aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2019-10-30 03:39:13 -0700
committerLinux Build Service Account <lnxbuild@localhost>2019-10-30 03:39:13 -0700
commitfb0145f76c31fc7a03164976b4f8c464ef558692 (patch)
tree1dde67c884d7a25ea2db6da211ba787e1e73e001
parentd3cff31c2800f4ea83167be7f9826dc9312fa35f (diff)
parent019eae0dbea12b73b957b1864eda060910a9bab3 (diff)
Merge 019eae0dbea12b73b957b1864eda060910a9bab3 on remote branchLA.UM.8.11.r1-01600-NICOBAR.0
Change-Id: I4016950ae01c9f58d2dd38d7e819f419e701f080
-rw-r--r--Documentation/devicetree/bindings/arm/msm/sdx-ext-ipc.txt31
-rw-r--r--Documentation/devicetree/bindings/display/msm/sde.txt37
-rw-r--r--Documentation/devicetree/bindings/net/qcom,sdxprairie-ethqos.txt179
-rw-r--r--Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt1
-rw-r--r--Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt10
-rw-r--r--Documentation/devicetree/bindings/usb/qpnp-pdphy.txt3
-rw-r--r--arch/arm/configs/vendor/qcs403-perf_defconfig1
-rw-r--r--arch/arm/configs/vendor/qcs403_defconfig1
-rw-r--r--arch/arm/configs/vendor/sdxprairie-perf_defconfig1
-rw-r--r--arch/arm/configs/vendor/sdxprairie_defconfig1
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile2
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-atp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-audio-overlay.dtsi16
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-audio.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-camera.dtsi9
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-idp.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-npu.dtsi23
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-pinctrl.dtsi26
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-qrd.dtsi57
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-sde.dtsi32
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-thermal.dtsi69
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-vidc.dtsi83
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-wcd937x-idp-audio-overlay.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/atoll.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts9
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-usb.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sa6155-display.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sdmmagpie-gpu.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dts24
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dts22
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dtsi18
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-pmic-overlay.dtsi10
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi392
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-thermal.dtsi50
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-usb.dtsi17
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-sdxprairie.dtsi366
-rw-r--r--arch/arm64/configs/vendor/atoll-perf_defconfig1
-rw-r--r--arch/arm64/configs/vendor/atoll_defconfig3
-rw-r--r--arch/arm64/configs/vendor/qcs403-perf_defconfig2
-rw-r--r--arch/arm64/configs/vendor/qcs403_defconfig1
-rw-r--r--arch/arm64/configs/vendor/qcs405-perf_defconfig2
-rw-r--r--arch/arm64/configs/vendor/qcs405_defconfig1
-rw-r--r--drivers/bus/mhi/controllers/mhi_arch_qcom.c15
-rw-r--r--drivers/bus/mhi/controllers/mhi_qcom.c1
-rw-r--r--drivers/bus/mhi/controllers/mhi_qcom.h1
-rw-r--r--drivers/bus/mhi/core/mhi_dtr.c5
-rw-r--r--drivers/bus/mhi/devices/mhi_netdev.c21
-rw-r--r--drivers/bus/mhi/devices/mhi_uci.c22
-rw-r--r--drivers/char/adsprpc.c84
-rw-r--r--drivers/char/adsprpc_shared.h16
-rw-r--r--drivers/char/diag/diag_masks.c3
-rw-r--r--drivers/char/diag/diagchar.h8
-rw-r--r--drivers/char/diag/diagchar_core.c52
-rw-r--r--drivers/char/diag/diagfwd.c9
-rw-r--r--drivers/char/diag/diagfwd_mhi.c6
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h6
-rw-r--r--drivers/char/diag/diagfwd_rpmsg.c45
-rw-r--r--drivers/char/diag/diagfwd_socket.c30
-rw-r--r--drivers/char/diag/diagfwd_socket.h3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c41
-rw-r--r--drivers/clk/qcom/npucc-atoll.c47
-rw-r--r--drivers/crypto/msm/ice.c49
-rw-r--r--drivers/devfreq/governor_gpubw_mon.c11
-rw-r--r--drivers/esoc/esoc-mdm-drv.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c16
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c31
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_usbpd.c18
-rw-r--r--drivers/gpu/drm/msm/dp/dp_usbpd.h17
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c61
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h11
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c32
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.h15
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c15
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c33
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h2
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c3
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c96
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h24
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c70
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h20
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c21
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c22
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c169
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h39
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c20
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_reg_dma.c5
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h3
-rw-r--r--drivers/gpu/drm/msm/shd/shd_drm.c142
-rw-r--r--drivers/gpu/drm/msm/shd/shd_drm.h2
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c6
-rw-r--r--drivers/gpu/msm/adreno_a6xx_snapshot.c14
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c73
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c105
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c5
-rw-r--r--drivers/gpu/msm/kgsl_trace.c5
-rw-r--r--drivers/gpu/msm/kgsl_trace_power.h49
-rw-r--r--drivers/hwtracing/coresight/coresight-byte-cntr.c3
-rw-r--r--drivers/hwtracing/coresight/coresight-csr.c112
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c3
-rw-r--r--drivers/iommu/arm-smmu.c93
-rw-r--r--drivers/iommu/dma-mapping-fast.c1
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c149
-rw-r--r--drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h44
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context.h16
-rw-r--r--drivers/media/platform/msm/camera/cam_core/cam_context_utils.c79
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c13
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c106
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h9
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c60
-rw-r--r--drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h33
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c14
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c43
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c97
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h8
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h3
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h26
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c344
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h66
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c162
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c50
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h40
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h1
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h189
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h199
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h195
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c241
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h34
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c117
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h31
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c13
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c113
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h10
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h18
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h6
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c64
-rw-r--r--drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h8
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c15
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c49
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c127
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h18
-rw-r--r--drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h21
-rw-r--r--drivers/media/platform/msm/npu/npu_mgr.c11
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_common.h13
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_debugfs.c105
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_dev.c225
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_host_ipc.c9
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_host_ipc.h50
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_hw.h3
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_hw_access.c100
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_hw_access.h3
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_mgr.c506
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_mgr.h17
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.c28
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_core.h4
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c6
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_platform.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom.c52
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c636
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h183
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-gpio.c237
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c444
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c108
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h2
-rw-r--r--drivers/net/ppp/pppopns.c5
-rw-r--r--drivers/net/wireless/cnss2/pci.c6
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3.h4
-rw-r--r--drivers/platform/msm/ep_pcie/ep_pcie.c7
-rw-r--r--drivers/platform/msm/ep_pcie/ep_pcie_core.c30
-rw-r--r--drivers/platform/msm/ep_pcie/ep_pcie_dbg.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c48
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_odl.c17
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c24
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c9
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.c14
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_sm.c16
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_uci.c30
-rw-r--r--drivers/platform/msm/qcom-geni-se.c132
-rw-r--r--drivers/power/supply/qcom/battery.c33
-rw-r--r--drivers/power/supply/qcom/smb1390-charger-psy.c19
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c28
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.h7
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c16
-rw-r--r--drivers/scsi/ufs/ufshcd.c10
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/soc/qcom/Kconfig10
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/dcc_v2.c40
-rw-r--r--drivers/soc/qcom/icnss.c1
-rw-r--r--drivers/soc/qcom/sdx_ext_ipc.c267
-rw-r--r--drivers/soc/qcom/smcinvoke.c138
-rw-r--r--drivers/spi/spi-geni-qcom.c137
-rw-r--r--drivers/thermal/of-thermal.c42
-rw-r--r--drivers/thermal/qcom/adc-tm5.c25
-rw-r--r--drivers/thermal/qcom/qmi_cooling.c16
-rw-r--r--drivers/tty/serial/msm_geni_serial.c51
-rw-r--r--drivers/tty/tty_ldisc.c7
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c21
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_cdev.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/pd/policy_engine.c23
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c93
-rw-r--r--include/linux/mhi.h2
-rw-r--r--include/linux/msm_ep_pcie.h5
-rw-r--r--include/linux/qcom-geni-se.h43
-rw-r--r--include/linux/usb/phy.h11
-rw-r--r--include/soc/qcom/scm.h1
-rw-r--r--kernel/sched/fair.c24
-rw-r--r--kernel/sched/topology.c28
-rw-r--r--net/qrtr/qrtr.c38
256 files changed, 9175 insertions, 1625 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/sdx-ext-ipc.txt b/Documentation/devicetree/bindings/arm/msm/sdx-ext-ipc.txt
new file mode 100644
index 000000000000..689c5b697fa5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/sdx-ext-ipc.txt
@@ -0,0 +1,31 @@
+Modem chipset attached to Application processor
+
+Modem chipset can be connected to an external apss processor. The control
+channel between the two chipsets consists of gpios that can relay the
+state of one subsytem to another. Modem can indicate different events
+(bootup/crash etc.) to AP and can get the same information from AP.
+
+Required Properties:
+- compatible: "qcom,sdx-ext-ipc".
+
+Required named gpio properties:
+- qcom,mdm2ap-status-gpio: gpio for modem to indicate the boot status to APQ.
+
+- qcom,ap2mdm-status-gpio: gpio for APQ to indicate the boot status to modem.
+
+
+
+Optional named gpio properties:
+- qcom,mdm2ap-status2-gpio: gpio for modem to indicate to APQ that it is in
+ E911 call or doing firmware upgrade.
+
+- qcom,ap2mdm-status2-gpio: gpio for APQ to indicate graceful shutdown to modem.
+
+Example:
+ sdx_ext_ipc: qcom,sdx_ext_ipc {
+ compatible = "qcom,sdx-ext-ipc";
+ qcom,ap2mdm-status-gpio = <&tlmm 64 0x00>;
+ qcom,ap2mdm-status2-gpio = <&tlmm 65 0x00>;
+ qcom,mdm2ap-status-gpio = <&tlmm 63 0x00>;
+ qcom,mdm2ap-status2-gpio = <&tlmm 66 0x00>;
+ };
diff --git a/Documentation/devicetree/bindings/display/msm/sde.txt b/Documentation/devicetree/bindings/display/msm/sde.txt
index e7b8b5834f85..4ca0b7128f8e 100644
--- a/Documentation/devicetree/bindings/display/msm/sde.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde.txt
@@ -448,6 +448,15 @@ Optional properties:
- qcom,sde-axi-bus-width: A u32 property to indicate the axi bus width value in bytes
- qcom,sde-mixer-stage-base-layer: A boolean property to indicate if a layer can be staged on base
stage instead of border fill
+- qcom,sde-limits: A node that lists the limits for different properties. This node
+ can have multiple child nodes. Each child node represents a
+ specific usecase limit. The usecase can be defined for properties like
+ sspp linewidth, bw limit etc.
+ e.g. qcom,sde-limits
+ -- qcom,sde-limit-name: name of the usecase
+ -- qcom,sde-limit-cases: different usecases to be considered
+ -- qcom,sde-limit-ids: respective ids for the above usecases
+ -- qcom,sde-limit-values: usecase and value for different combinations
Bus Scaling Subnodes:
- qcom,sde-reg-bus: Property to provide Bus scaling for register access for
@@ -771,6 +780,34 @@ Example:
qcom,sde-dspp-vlut = <0x0 0x00010000>;
};
+ qcom,sde-limits {
+ qcom,sde-linewidth-limits{
+ qcom,sde-limit-cases = "vig", "dma", "scaling", "inline_rot";
+ qcom,sde-limit-ids= <0x1 0x2 0x4 0x8>;
+ /* the qcom,sde-limit-values property consist of two values:
+ one for the usecase and the other for the value. The usecase can be
+ any combination of the values mentioned in qcom,sde-limit-ids.
+ For eg: <0x5 2560> means usecase is 0x5 and value is 2560.
+ 0x5 = (0x1 | 0x4) = vig + scaling. Thus the linewidth for usecase
+ vig + scaling = 2560 */
+ qcom,sde-limit-values = <0x1 4096>,
+ <0x5 2560>,
+ <0xd 1088>,
+ <0x2 4096>;
+ };
+ qcom,sde-bw-limits{
+ qcom,sde-limit-cases = "per_pipe", "total_bw", "vfe_on", "cwb_on";
+ qcom,sde-limit-ids = <0x1 0x2 0x4 0x8>;
+ qcom,sde-limit-values = <0x1 2600000>,
+ <0x9 2600000>,
+ <0x5 2600000>,
+ <0xd 2600000>,
+ <0x2 5800000>,
+ <0xa 5500000>,
+ <0x6 4400000>,
+ <0xe 3900000>;
+ };
+ };
qcom,sde-mixer-blocks {
qcom,sde-mixer-gc = <0x3c0 0x00010000>;
};
diff --git a/Documentation/devicetree/bindings/net/qcom,sdxprairie-ethqos.txt b/Documentation/devicetree/bindings/net/qcom,sdxprairie-ethqos.txt
new file mode 100644
index 000000000000..e8797eb611ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qcom,sdxprairie-ethqos.txt
@@ -0,0 +1,179 @@
+* STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
+
+Required properties:
+- compatible: Should be "qcom,sdxprairie-ethqos",
+ For backwards compatibility: "st,spear600-gmac" is also supported.
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- interrupts: Should contain the STMMAC interrupts
+- interrupt-names: Should contain a list of interrupt names corresponding to
+ the interrupts in the interrupts property, if available.
+ Valid interrupt names are:
+ - "macirq" (combined signal for various interrupt events)
+ - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
+ - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
+- phy-mode: See ethernet.txt file in the same directory.
+- snps,reset-gpio gpio number for phy reset.
+- snps,reset-active-low boolean flag to indicate if phy reset is active low.
+- snps,reset-delays-us is triplet of delays
+ The 1st cell is reset pre-delay in micro seconds.
+ The 2nd cell is reset pulse in micro seconds.
+ The 3rd cell is reset post-delay in micro seconds.
+
+Optional properties:
+- resets: Should contain a phandle to the STMMAC reset signal, if any
+- reset-names: Should contain the reset signal name "stmmaceth", if a
+ reset phandle is given
+- max-frame-size: See ethernet.txt file in the same directory
+- clocks: If present, the first clock should be the GMAC main clock and
+ the second clock should be peripheral's register interface clock. Further
+ clocks may be specified in derived bindings.
+- clock-names: One name for each entry in the clocks property, the
+ first one should be "stmmaceth" and the second one should be "pclk".
+- ptp_ref: this is the PTP reference clock; in case of the PTP is available
+ this clock is used for programming the Timestamp Addend Register. If not
+ passed then the system clock will be used and this is fine on some
+ platforms.
+- tx-fifo-depth: See ethernet.txt file in the same directory
+- rx-fifo-depth: See ethernet.txt file in the same directory
+- snps,pbl Programmable Burst Length (tx and rx)
+- snps,txpbl Tx Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA tx will use this value rather than snps,pbl.
+- snps,rxpbl Rx Programmable Burst Length. Only for GMAC and newer.
+ If set, DMA rx will use this value rather than snps,pbl.
+- snps,no-pbl-x8 Don't multiply the pbl/txpbl/rxpbl values by 8.
+ For core rev < 3.50, don't multiply the values by 4.
+- snps,aal Address-Aligned Beats
+- snps,fixed-burst Program the DMA to use the fixed burst mode
+- snps,mixed-burst Program the DMA to use the mixed burst mode
+- snps,force_thresh_dma_mode Force DMA to use the threshold mode for
+ both tx and rx
+- snps,force_sf_dma_mode Force DMA to use the Store and Forward
+ mode for both tx and rx. This flag is
+ ignored if force_thresh_dma_mode is set.
+- snps,en-tx-lpi-clockgating Enable gating of the MAC TX clock during
+ TX low-power mode
+- snps,multicast-filter-bins: Number of multicast filter hash bins
+ supported by this device instance
+- snps,perfect-filter-entries: Number of perfect filter entries supported
+ by this device instance
+- snps,ps-speed: port selection speed that can be passed to the core when
+ PCS is supported. For example, this is used in case of SGMII
+ and MAC2MAC connection.
+- snps,tso: this enables the TSO feature otherwise it will be managed by
+ MAC HW capability register. Only for GMAC4 and newer.
+- AXI BUS Mode parameters: below the list of all the parameters to program the
+ AXI register inside the DMA module:
+ - snps,lpi_en: enable Low Power Interface
+ - snps,xit_frm: unlock on WoL
+ - snps,wr_osr_lmt: max write outstanding req. limit
+ - snps,rd_osr_lmt: max read outstanding req. limit
+ - snps,kbbe: do not cross 1KiB boundary.
+ - snps,blen: this is a vector of supported burst length.
+ - snps,fb: fixed-burst
+ - snps,mb: mixed-burst
+ - snps,rb: rebuild INCRx Burst
+- mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
+- Multiple RX Queues parameters: below the list of all the parameters to
+ configure the multiple RX queues:
+ - snps,rx-queues-to-use: number of RX queues to be used in the driver
+ - Choose one of these RX scheduling algorithms:
+ - snps,rx-sched-sp: Strict priority
+ - snps,rx-sched-wsp: Weighted Strict priority
+ - For each RX queue
+ - Choose one of these modes:
+ - snps,dcb-algorithm: Queue to be enabled as DCB
+ - snps,avb-algorithm: Queue to be enabled as AVB
+ - snps,map-to-dma-channel: Channel to map
+ - Specifiy specific packet routing:
+ - snps,route-avcp: AV Untagged Control packets
+ - snps,route-ptp: PTP Packets
+ - snps,route-dcbcp: DCB Control Packets
+ - snps,route-up: Untagged Packets
+ - snps,route-multi-broad: Multicast & Broadcast Packets
+ - snps,priority: RX queue priority (Range: 0x0 to 0xF)
+- Multiple TX Queues parameters: below the list of all the parameters to
+ configure the multiple TX queues:
+ - snps,tx-queues-to-use: number of TX queues to be used in the driver
+ - Choose one of these TX scheduling algorithms:
+ - snps,tx-sched-wrr: Weighted Round Robin
+ - snps,tx-sched-wfq: Weighted Fair Queuing
+ - snps,tx-sched-dwrr: Deficit Weighted Round Robin
+ - snps,tx-sched-sp: Strict priority
+ - For each TX queue
+ - snps,weight: TX queue weight (if using a DCB weight algorithm)
+ - Choose one of these modes:
+ - snps,dcb-algorithm: TX queue will be working in DCB
+ - snps,avb-algorithm: TX queue will be working in AVB
+ [Attention] Queue 0 is reserved for legacy traffic
+ and so no AVB is available in this queue.
+ - Configure Credit Base Shaper (if AVB Mode selected):
+ - snps,send_slope: enable Low Power Interface
+ - snps,idle_slope: unlock on WoL
+ - snps,high_credit: max write outstanding req. limit
+ - snps,low_credit: max read outstanding req. limit
+ - snps,priority: TX queue priority (Range: 0x0 to 0xF)
+Examples:
+
+ stmmac_axi_setup: stmmac-axi-config {
+ snps,wr_osr_lmt = <0xf>;
+ snps,rd_osr_lmt = <0xf>;
+ snps,blen = <256 128 64 32 0 0 0>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <1>;
+ snps,rx-sched-sp;
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,priority = <0x0>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <2>;
+ snps,tx-sched-wrr;
+ queue0 {
+ snps,weight = <0x10>;
+ snps,dcb-algorithm;
+ snps,priority = <0x0>;
+ };
+
+ queue1 {
+ snps,avb-algorithm;
+ snps,send_slope = <0x1000>;
+ snps,idle_slope = <0x1000>;
+ snps,high_credit = <0x3E800>;
+ snps,low_credit = <0xFFC18000>;
+ snps,priority = <0x1>;
+ };
+ };
+
+ gmac0: ethernet@e0800000 {
+ compatible = "qcom,sdxprairie-ethqos";
+ reg = <0xe0800000 0x8000>;
+ interrupt-parent = <&vic1>;
+ interrupts = <24 23 22>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+ mac-address = [000000000000]; /* Filled in by U-Boot */
+ max-frame-size = <3800>;
+ phy-mode = "gmii";
+ snps,multicast-filter-bins = <256>;
+ snps,perfect-filter-entries = <128>;
+ rx-fifo-depth = <16384>;
+ tx-fifo-depth = <16384>;
+ clocks = <&clock>;
+ clock-names = "stmmaceth";
+ snps,axi-config = <&stmmac_axi_setup>;
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+ phy1: ethernet-phy@0 {
+ };
+ };
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ };
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
index dde8231484a5..ab1d59b31e58 100644
--- a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -18,6 +18,7 @@ Optional properties:
and Corex/2x paths.
- qcom,vote-for-bw: Boolean flag to check if ab/ib vote should be given
as bandwidth or BCM threashold.
+- qcom,subsys-name: SSC QUPv3 subsystem name for SSR notification registration.
Optional subnodes:
qcom,iommu_qupv3_geni_se_cb: Child node representing the QUPV3 context
@@ -31,6 +32,7 @@ Subnode Required properties:
Example:
qupv3_0: qcom,qupv3_0_geni_se@8c0000 {
compatible = "qcom,qupv3-geni-se";
+ qcom,subsys-name = "adsp";
reg = <0x8c0000 0x6000>;
qcom,bus-mas-id = <100>;
qcom,bus-slv-id = <300>;
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
index 31b47d5853bf..1348e5696fd1 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt
@@ -29,6 +29,7 @@ Optional properties:
- qcom,rt: Specifies if the framework worker thread for this
controller device should have "real-time" priority.
- qcom,disable-autosuspend: Specifies to disable runtime PM auto suspend.
+- ssr-enable: Required only for SSC QupV3 client for SSR notification.
SPI slave nodes must be children of the SPI master node and can contain
the following properties.
diff --git a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
index a10b00d94c0a..15d0f4a607b1 100644
--- a/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
+++ b/Documentation/devicetree/bindings/thermal/qti-qmi-cdev.txt
@@ -35,21 +35,23 @@ Subsystem properties:
Definition: Remote subsystem device identifier. Below strings
are the only acceptable device names,
"pa" -> for pa cooling device,
+ "pa_fr1" -> for pa cooling device,
"cpuv_restriction_cold" -> for vdd restriction,
"cx_vdd_limit" -> for vdd limit,
"modem" -> for processor passive cooling device,
"modem_current" -> for current limiting device,
"modem_bw" -> for bus bandwidth limiting device,
"vbatt_low" -> BCL vbat mitigation device,
+ "charge_state" -> Charge state mitigation device,
"mmw0" -> Millimeter wave limiting device 0,
"mmw1" -> Millimeter wave limiting device 1,
"mmw2" -> Millimeter wave limiting device 2,
"mmw3" -> Millimeter wave limiting device 3,
"modem_skin" -> Modem skin mitigation device,
- "modem_skin0" -> Modem skin mitigation device0,
- "modem_skin1" -> Modem skin mitigation device1,
- "modem_skin2" -> Modem skin mitigation device2,
- "modem_skin3" -> Modem skin mitigation device3,
+ "mmw_skin0" -> MMW skin mitigation device0,
+ "mmw_skin1" -> MMW skin mitigation device1,
+ "mmw_skin2" -> MMW skin mitigation device2,
+ "mmw_skin3" -> MMW skin mitigation device3,
"cpr_cold" -> for cpr restriction,
"cdsp_sw" -> for CDSP DCVS mitigation device,
"cdsp_hw" -> for CDSP hardware mitigation device.
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
index ab2bbe4ae5e1..ebbb6d5b4770 100644
--- a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -40,9 +40,6 @@ Optional properties:
- vconn-supply: Regulator that enables VCONN source output. This will
be supplied on the USB CC line that is not used for
communication when Ra resistance is detected.
-- qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
- from an external regulator. If omitted, then it is
- assumed it is connected to VBUS.
- qcom,default-sink-caps: List of 32-bit values representing the nominal sink
capabilities in voltage (millivolts) and current
(milliamps) pairs.
diff --git a/arch/arm/configs/vendor/qcs403-perf_defconfig b/arch/arm/configs/vendor/qcs403-perf_defconfig
index d33cacb1f151..37315244ddfb 100644
--- a/arch/arm/configs/vendor/qcs403-perf_defconfig
+++ b/arch/arm/configs/vendor/qcs403-perf_defconfig
@@ -132,6 +132,7 @@ CONFIG_NTAG_NQ=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_ALIGNMENT=4
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/vendor/qcs403_defconfig b/arch/arm/configs/vendor/qcs403_defconfig
index 5fbe5d4fd121..ced0831f2805 100644
--- a/arch/arm/configs/vendor/qcs403_defconfig
+++ b/arch/arm/configs/vendor/qcs403_defconfig
@@ -211,6 +211,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_ALIGNMENT=4
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/vendor/sdxprairie-perf_defconfig b/arch/arm/configs/vendor/sdxprairie-perf_defconfig
index 8c93c93d88fa..5b04a1345653 100644
--- a/arch/arm/configs/vendor/sdxprairie-perf_defconfig
+++ b/arch/arm/configs/vendor/sdxprairie-perf_defconfig
@@ -397,6 +397,7 @@ CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_DCC_V2=y
+CONFIG_SDX_EXT_IPC=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EUD=y
CONFIG_QCOM_BUS_SCALING=y
diff --git a/arch/arm/configs/vendor/sdxprairie_defconfig b/arch/arm/configs/vendor/sdxprairie_defconfig
index 3a3bfcaf0de3..a9aa31af66d2 100644
--- a/arch/arm/configs/vendor/sdxprairie_defconfig
+++ b/arch/arm/configs/vendor/sdxprairie_defconfig
@@ -408,6 +408,7 @@ CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_DCC_V2=y
+CONFIG_SDX_EXT_IPC=y
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_EUD=y
CONFIG_QCOM_BUS_SCALING=y
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 6b8c3ad7a6fc..2aad514204ae 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -286,6 +286,8 @@ dtb-$(CONFIG_ARCH_SDXPRAIRIE) += sdxprairie-rumi.dtb \
sdxprairie-v2-pcie-ep-mtp.dtb \
sdxprairie-mtp-cpe.dtb \
sdxprairie-cdp-cpe.dtb \
+ sdxprairie-mtp-v1.1-cpe.dtb \
+ sdxprairie-cdp-v1.1-cpe.dtb \
sdxprairie-mtp-le-cpe.dtb \
sdxprairie-v2-mtp-le-cpe.dtb \
sdxprairie-cdp-256.dtb \
diff --git a/arch/arm64/boot/dts/qcom/atoll-atp.dtsi b/arch/arm64/boot/dts/qcom/atoll-atp.dtsi
index 07617ddd7036..9b075aef642e 100644
--- a/arch/arm64/boot/dts/qcom/atoll-atp.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-atp.dtsi
@@ -296,3 +296,7 @@
};
};
};
+
+&sde_dp {
+ qcom,dp-aux-switch=<&fsa4480>;
+};
diff --git a/arch/arm64/boot/dts/qcom/atoll-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/atoll-audio-overlay.dtsi
index b5cd468abda3..dd60c5d58474 100644
--- a/arch/arm64/boot/dts/qcom/atoll-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-audio-overlay.dtsi
@@ -69,12 +69,12 @@
qcom,swr-mstr-irq-wakeup-capable = <1>;
wcd938x_tx_slave: wcd938x-tx-slave {
compatible = "qcom,wcd938x-slave";
- reg = <0x0 0x01170223>;
+ reg = <0x0D 0x01170223>;
};
wcd937x_tx_slave: wcd937x-tx-slave {
status = "disabled";
compatible = "qcom,wcd937x-slave";
- reg = <0x0 0x01170223>;
+ reg = <0x0A 0x01170223>;
};
};
};
@@ -111,12 +111,12 @@
qcom,swr-clock-stop-mode0 = <1>;
wcd938x_rx_slave: wcd938x-rx-slave {
compatible = "qcom,wcd938x-slave";
- reg = <0x0 0x01170224>;
+ reg = <0x0D 0x01170224>;
};
wcd937x_rx_slave: wcd937x-rx-slave {
status = "disabled";
compatible = "qcom,wcd937x-slave";
- reg = <0x0 0x01170224>;
+ reg = <0x0A 0x01170224>;
};
};
};
@@ -153,28 +153,28 @@
qcom,swr-num-dev = <2>;
wsa881x_0211: wsa881x@20170211 {
compatible = "qcom,wsa881x";
- reg = <0x0 0x20170211>;
+ reg = <0x10 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
qcom,bolero-handle = <&bolero>;
};
wsa881x_0212: wsa881x@20170212 {
compatible = "qcom,wsa881x";
- reg = <0x0 0x20170212>;
+ reg = <0x10 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
qcom,bolero-handle = <&bolero>;
};
wsa881x_0213: wsa881x@21170213 {
compatible = "qcom,wsa881x";
- reg = <0x0 0x21170213>;
+ reg = <0x10 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
qcom,bolero-handle = <&bolero>;
};
wsa881x_0214: wsa881x@21170214 {
compatible = "qcom,wsa881x";
- reg = <0x0 0x21170214>;
+ reg = <0x10 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
qcom,bolero-handle = <&bolero>;
};
diff --git a/arch/arm64/boot/dts/qcom/atoll-audio.dtsi b/arch/arm64/boot/dts/qcom/atoll-audio.dtsi
index 91098e22697b..19c053782057 100644
--- a/arch/arm64/boot/dts/qcom/atoll-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-audio.dtsi
@@ -92,6 +92,9 @@
qcom,ext-disp-audio-rx = <1>;
qcom,afe-rxtx-lb = <0>;
+ clock-names = "lpass_audio_hw_vote";
+ clocks = <&lpass_audio_hw_vote 0>;
+
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&compr>,
@@ -184,7 +187,6 @@
};
&qupv3_se9_i2c {
- status = "ok";
fsa4480: fsa4480@42 {
compatible = "qcom,fsa4480-i2c";
reg = <0x42>;
diff --git a/arch/arm64/boot/dts/qcom/atoll-camera.dtsi b/arch/arm64/boot/dts/qcom/atoll-camera.dtsi
index 54c3b13e2dd6..91232e53dc1e 100644
--- a/arch/arm64/boot/dts/qcom/atoll-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-camera.dtsi
@@ -899,8 +899,9 @@
<&clock_camcc CAM_CC_IFE_0_AXI_CLK>;
clock-rates =
<0 0 0 0 0 0 270000000 0 0 0 360000000 0 0>,
+ <0 0 0 0 0 0 360000000 0 0 0 432000000 0 0>,
<0 0 0 0 0 0 480000000 0 0 0 600000000 0 0>;
- clock-cntl-level = "svs", "turbo";
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_csid_clk_src";
ppi-enable;
status = "ok";
@@ -987,8 +988,9 @@
<&clock_camcc CAM_CC_IFE_1_AXI_CLK>;
clock-rates =
<0 0 0 0 0 0 270000000 0 0 0 360000000 0 0>,
+ <0 0 0 0 0 0 360000000 0 0 0 432000000 0 0>,
<0 0 0 0 0 0 480000000 0 0 0 600000000 0 0>;
- clock-cntl-level = "svs", "turbo";
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_csid_clk_src";
ppi-enable;
status = "ok";
@@ -1072,8 +1074,9 @@
<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
clock-rates =
<0 0 0 0 0 0 270000000 0 0 0 360000000 0>,
+ <0 0 0 0 0 0 360000000 0 0 0 432000000 0>,
<0 0 0 0 0 0 480000000 0 0 0 600000000 0>;
- clock-cntl-level = "svs", "turbo";
+ clock-cntl-level = "svs", "svs_l1", "turbo";
src-clock-name = "ife_csid_clk_src";
ppi-enable;
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/atoll-idp.dtsi b/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
index 87c5eea72532..6f9e3cbd53d8 100644
--- a/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
@@ -283,3 +283,8 @@
&nfc_clk_req_suspend>;
};
};
+
+&sde_dp{
+ qcom,dp-aux-switch = <&fsa4480>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/atoll-npu.dtsi b/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
index 71e0a6a97530..64394c347739 100644
--- a/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
@@ -17,8 +17,9 @@
reg = <0x9900000 0x20000>,
<0x99f0000 0x10000>,
<0x9980000 0x10000>,
- <0x17c00000 0x10000>;
- reg-names = "tcm", "core", "cc", "apss_shared";
+ <0x17c00000 0x10000>,
+ <0x01f40000 0x40000>;
+ reg-names = "tcm", "core", "cc", "apss_shared", "tcsr";
interrupts = <GIC_SPI 583 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 585 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 588 IRQ_TYPE_EDGE_RISING>,
@@ -60,6 +61,10 @@
qcom,proxy-reg-names ="vdd", "vdd_cx";
qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
#cooling-cells = <2>;
+ mboxes = <&apcs_glb2 4>,
+ <&apcs_glb2 6>;
+ mbox-names = "glink", "smp2p";
+ #mbox-cells = <1>;
qcom,npubw-devs = <&npu_npu_ddr_bw>;
qcom,npubw-dev-names = "ddr_bw";
qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
@@ -75,8 +80,8 @@
100000000
200000000
200000000
- 120000000
- 40000000
+ 150000000
+ 30000000
200000000
19200000
50000000
@@ -94,7 +99,7 @@
400000000
400000000
200000000
- 40000000
+ 37500000
300000000
19200000
50000000
@@ -112,8 +117,8 @@
515000000
515000000
300000000
- 75000000
- 400000000
+ 37500000
+ 403000000
19200000
50000000
19200000
@@ -135,7 +140,7 @@
19200000
100000000
19200000
- 515000000
+ 650000000
19200000
660000000>;
};
@@ -153,7 +158,7 @@
19200000
100000000
19200000
- 650000000
+ 800000000
19200000
800000000>;
};
diff --git a/arch/arm64/boot/dts/qcom/atoll-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/atoll-pinctrl.dtsi
index 50d8249284df..50f04206237a 100644
--- a/arch/arm64/boot/dts/qcom/atoll-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-pinctrl.dtsi
@@ -218,6 +218,32 @@
};
};
+ sde_dp_aux_active: sde_dp_aux_active {
+ mux {
+ pins = "gpio55", "gpio33";
+ function = "gpio";
+ };
+
+ config{
+ pins = "gpio55", "gpio33";
+ bias-disable = <0>;
+ drive-strength = <8>;
+ };
+ };
+
+ sde_dp_aux_suspend: sde_dp_aux_suspend {
+ mux {
+ pins = "gpio55", "gpio33";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio55", "gpio33";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
sde_dp_usbplug_cc_suspend: sde_dp_cc_suspend {
mux {
pins = "gpio104";
diff --git a/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi b/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
index 16b5fef609fb..c15d330f712e 100644
--- a/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
@@ -135,6 +135,50 @@
extcon = <&pm6150_pdphy>, <&pm6150_charger>, <&eud>;
};
+&qusb_phy0 {
+ qcom,qusb-phy-init-seq =
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x22 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x08 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x67 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x30 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+
+ qcom,qusb-phy-host-init-seq =
+ /* <value reg_offset> */
+ <0x23 0x210 /* PWR_CTRL1 */
+ 0x03 0x04 /* PLL_ANALOG_CONTROLS_TWO */
+ 0x7c 0x18c /* PLL_CLOCK_INVERTERS */
+ 0x80 0x2c /* PLL_CMODE */
+ 0x0a 0x184 /* PLL_LOCK_DELAY */
+ 0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
+ 0x40 0x194 /* PLL_BIAS_CONTROL_1 */
+ 0x22 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x21 0x214 /* PWR_CTRL2 */
+ 0x08 0x220 /* IMP_CTRL1 */
+ 0x58 0x224 /* IMP_CTRL2 */
+ 0x67 0x240 /* TUNE1 */
+ 0x29 0x244 /* TUNE2 */
+ 0xca 0x248 /* TUNE3 */
+ 0x04 0x24c /* TUNE4 */
+ 0x03 0x250 /* TUNE5 */
+ 0x30 0x23c /* CHG_CTRL2 */
+ 0x22 0x210>; /* PWR_CTRL1 */
+};
+
&usb_qmp_dp_phy {
extcon = <&pm6150_pdphy>;
};
@@ -436,3 +480,16 @@
&dsi_rm69299_visionox_amoled_vid_display {
qcom,dsi-display-active;
};
+
+&sde_dp{
+ pinctrl-names = "mdss_dp_active", "mdss_dp_sleep";
+ pinctrl-0 = <&sde_dp_aux_active &sde_dp_usbplug_cc_active>;
+ pinctrl-1 = <&sde_dp_aux_suspend &sde_dp_usbplug_cc_suspend>;
+ qcom,aux-en-gpio = <&tlmm 55 0>;
+ qcom,aux-sel-gpio = <&tlmm 33 0>;
+ qcom,dp-gpio-aux-switch;
+};
+
+&fsa4480{
+ status="disabled";
+};
diff --git a/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi b/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
index 825e79639f51..c70f002b8e92 100644
--- a/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
@@ -198,7 +198,6 @@
&sde_dp {
qcom,dp-usbpd-detection = <&pm6150_pdphy>;
qcom,ext-disp = <&ext_disp>;
- qcom,dp-aux-switch = <&fsa4480>;
qcom,usbplug-cc-gpio = <&tlmm 104 0>;
diff --git a/arch/arm64/boot/dts/qcom/atoll-sde.dtsi b/arch/arm64/boot/dts/qcom/atoll-sde.dtsi
index d23ea4e9452b..af9ccda50f6f 100644
--- a/arch/arm64/boot/dts/qcom/atoll-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-sde.dtsi
@@ -258,6 +258,38 @@
<1 590 0 150000>,
<1 590 0 300000>;
};
+
+ qcom,sde-limits {
+ qcom,sde-linewidth-limits{
+ qcom,sde-limit-name = "sspp_linewidth_usecases";
+ qcom,sde-limit-cases = "vig", "dma", "scale";
+ qcom,sde-limit-ids= <0x1 0x2 0x4>;
+ qcom,sde-limit-values = <0x1 4096>,
+ <0x5 2560>,
+ <0x2 2160>;
+ };
+ qcom,sde-bw-limits{
+ qcom,sde-limit-name = "sde_bwlimit_usecases";
+ qcom,sde-limit-cases = "per_vig_pipe",
+ "per_dma_pipe",
+ "total_max_bw",
+ "camera_concurrency",
+ "cwb_concurrency";
+ qcom,sde-limit-ids = <0x1 0x2 0x4 0x8 0x10>;
+ qcom,sde-limit-values = <0x1 2600000>,
+ <0x11 2600000>,
+ <0x9 2600000>,
+ <0x19 2600000>,
+ <0x2 2600000>,
+ <0x12 2600000>,
+ <0xa 2600000>,
+ <0x1a 2600000>,
+ <0x4 5800000>,
+ <0x14 5500000>,
+ <0xc 4400000>,
+ <0x1c 3900000>;
+ };
+ };
};
sde_rscc: qcom,sde_rscc@af20000 {
diff --git a/arch/arm64/boot/dts/qcom/atoll-thermal.dtsi b/arch/arm64/boot/dts/qcom/atoll-thermal.dtsi
index a260ec4a5ab0..42ef2ffe8aa5 100644
--- a/arch/arm64/boot/dts/qcom/atoll-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-thermal.dtsi
@@ -1116,35 +1116,59 @@
thermal-sensors = <&pm6150l_adc_tm ADC_AMUX_THM1_PU2>;
wake-capable-sensor;
trips {
- skin_gold_trip: gold-trip {
+ skin_modem_trip0: modem-trip0 {
temperature = <48000>;
- hysteresis = <0>;
+ hysteresis = <4000>;
type = "passive";
};
- skin_cx_trip: skin-cx-trip {
+ skin_modem_trip1: modem-trip1 {
temperature = <50000>;
- hysteresis = <5000>;
+ hysteresis = <2000>;
type = "passive";
};
- skin_silver_trip: silver-trip {
+ skin_gold_hvx_trip: gold-hvx-trip {
+ temperature = <50000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ skin_gpu_trip: skin-gpu-trip {
temperature = <52000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
+
+ skin_modem_trip2: modem-trip2 {
+ temperature = <52000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+
+ skin_silver_trip: silver-trip {
+ temperature = <54000>;
hysteresis = <0>;
type = "passive";
};
+
+ skin_modem_trip3: modem-trip3 {
+ temperature = <58000>;
+ hysteresis = <4000>;
+ type = "passive";
+ };
};
cooling-maps {
skin_cpu6 {
- trip = <&skin_gold_trip>;
+ trip = <&skin_gold_hvx_trip>;
cooling-device =
/* throttle from fmax to 1708800KHz */
<&CPU6 THERMAL_NO_LIMIT
(THERMAL_MAX_LIMIT-7)>;
};
skin_cpu7 {
- trip = <&skin_gold_trip>;
+ trip = <&skin_gold_hvx_trip>;
cooling-device =
<&CPU7 THERMAL_NO_LIMIT
(THERMAL_MAX_LIMIT-7)>;
@@ -1181,18 +1205,33 @@
(THERMAL_MAX_LIMIT-7)>;
};
skin_gpu {
- trip = <&skin_cx_trip>;
- cooling-device = <&msm_gpu (THERMAL_MAX_LIMIT-1)
- (THERMAL_MAX_LIMIT-1)>;
+ trip = <&skin_gpu_trip>;
+ cooling-device = <&msm_gpu (THERMAL_MAX_LIMIT-3)
+ (THERMAL_MAX_LIMIT-3)>;
};
- skin_modem {
- trip = <&skin_cx_trip>;
+ skin_modem_proc0 {
+ trip = <&skin_modem_trip0>;
+ cooling-device = <&modem_proc 1 1>;
+ };
+ skin_modem_proc1 {
+ trip = <&skin_modem_trip3>;
+ cooling-device = <&modem_proc 3 3>;
+ };
+ skin_modem_pa0 {
+ trip = <&skin_modem_trip1>;
+ cooling-device = <&modem_pa 1 1>;
+ };
+ skin_modem_pa1 {
+ trip = <&skin_modem_trip2>;
+ cooling-device = <&modem_pa 2 2>;
+ };
+ skin_modem_pa2 {
+ trip = <&skin_modem_trip3>;
cooling-device = <&modem_pa 3 3>;
};
cdsp_cdev1 {
- trip = <&skin_cx_trip>;
- cooling-device = <&cdsp_sw THERMAL_MAX_LIMIT
- THERMAL_MAX_LIMIT>;
+ trip = <&skin_gold_hvx_trip>;
+ cooling-device = <&cdsp_sw THERMAL_NO_LIMIT 4>;
};
};
};
diff --git a/arch/arm64/boot/dts/qcom/atoll-vidc.dtsi b/arch/arm64/boot/dts/qcom/atoll-vidc.dtsi
index ec3b77ba77f2..004e10877371 100644
--- a/arch/arm64/boot/dts/qcom/atoll-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-vidc.dtsi
@@ -15,10 +15,11 @@
#include <dt-bindings/clock/qcom,videocc-atoll.h>
&soc {
- msm_vidc: qcom,vidc@aa00000 {
+ msm_vidc0: qcom,vidc0@aa00000 {
compatible = "qcom,msm-vidc", "qcom,atoll-vidc";
status = "ok";
reg = <0xaa00000 0x200000>;
+ sku-index = <0>;
interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
/* Supply */
@@ -102,5 +103,85 @@
qcom,secure-context-bank;
};
};
+ msm_vidc1: qcom,vidc1@aa00000 {
+ compatible = "qcom,msm-vidc", "qcom,atoll-vidc";
+ status = "ok";
+ reg = <0xaa00000 0x200000>;
+ sku-index = <1>;
+ interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+ /* Supply */
+ venus-supply = <&venus_gdsc>;
+ venus-core0-supply = <&vcodec0_gdsc>;
+ /* Clocks */
+ clock-names = "core_clk", "iface_clk", "bus_clk",
+ "core0_clk", "core0_bus_clk";
+ clocks = <&clock_videocc VIDEO_CC_VENUS_CTL_CORE_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_AHB_CLK>,
+ <&clock_videocc VIDEO_CC_VENUS_CTL_AXI_CLK>,
+ <&clock_videocc VIDEO_CC_VCODEC0_CORE_CLK>,
+ <&clock_videocc VIDEO_CC_VCODEC0_AXI_CLK>;
+ qcom,proxy-clock-names = "core_clk", "iface_clk",
+ "bus_clk", "core0_clk", "core0_bus_clk";
+ qcom,clock-configs = <0x1 0x0 0x0 0x1 0x0>;
+ qcom,allowed-clock-rates = <150000000 270000000 340000000
+ 380000000>;
+ /* Buses */
+ bus_cnoc {
+ compatible = "qcom,msm-vidc,bus";
+ label = "cnoc";
+ qcom,bus-master = <MSM_BUS_MASTER_AMPSS_M0>;
+ qcom,bus-slave = <MSM_BUS_SLAVE_VENUS_CFG>;
+ qcom,bus-governor = "performance";
+ qcom,bus-range-kbps = <1000 1000>;
+ };
+ venus_bus_ddr {
+ compatible = "qcom,msm-vidc,bus";
+ label = "venus-ddr";
+ qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+ qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,bus-governor = "vidc-ar50-ddr";
+ qcom,bus-range-kbps = <1000 2128000>;
+ };
+ arm9_bus_ddr {
+ compatible = "qcom,msm-vidc,bus";
+ label = "venus-arm9-ddr";
+ qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
+ qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,bus-governor = "performance";
+ qcom,bus-range-kbps = <1000 1000>;
+ };
+ /* MMUs */
+ non_secure_cb {
+ compatible = "qcom,msm-vidc,context-bank";
+ label = "venus_ns";
+ iommus = <&apps_smmu 0xC00 0x60>;
+ buffer-types = <0xfff>;
+ virtual-addr-pool = <0x70800000 0x6f800000>;
+ };
+ secure_bitstream_cb {
+ compatible = "qcom,msm-vidc,context-bank";
+ label = "venus_sec_bitstream";
+ iommus = <&apps_smmu 0xC21 0x4>;
+ buffer-types = <0x241>;
+ virtual-addr-pool = <0x4b000000 0x25800000>;
+ qcom,secure-context-bank;
+ };
+ secure_pixel_cb {
+ compatible = "qcom,msm-vidc,context-bank";
+ label = "venus_sec_pixel";
+ iommus = <&apps_smmu 0xC23 0x0>;
+ buffer-types = <0x106>;
+ virtual-addr-pool = <0x25800000 0x25800000>;
+ qcom,secure-context-bank;
+ };
+ secure_non_pixel_cb {
+ compatible = "qcom,msm-vidc,context-bank";
+ label = "venus_sec_non_pixel";
+ iommus = <&apps_smmu 0xC04 0x60>;
+ buffer-types = <0x480>;
+ virtual-addr-pool = <0x1000000 0x24800000>;
+ qcom,secure-context-bank;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/atoll-wcd937x-idp-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/atoll-wcd937x-idp-audio-overlay.dtsi
index 0db8825948c9..4dba3d2aa096 100644
--- a/arch/arm64/boot/dts/qcom/atoll-wcd937x-idp-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-wcd937x-idp-audio-overlay.dtsi
@@ -101,9 +101,7 @@
"VA SWR_MIC6", "VA_SWR_CLK",
"VA SWR_MIC7", "VA_SWR_CLK",
"VA SWR_ADC0", "ADC1_OUTPUT",
- "VA SWR_ADC1", "ADC2_OUTPUT",
- "VA SWR_ADC2", "ADC3_OUTPUT",
- "VA SWR_ADC3", "ADC4_OUTPUT",
+ "VA SWR_ADC2", "ADC2_OUTPUT",
"VA SWR_MIC0", "DMIC1_OUTPUT",
"VA SWR_MIC1", "DMIC2_OUTPUT",
"VA SWR_MIC2", "DMIC3_OUTPUT",
diff --git a/arch/arm64/boot/dts/qcom/atoll.dtsi b/arch/arm64/boot/dts/qcom/atoll.dtsi
index a13eb0c2fb69..542d5066d0c7 100644
--- a/arch/arm64/boot/dts/qcom/atoll.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll.dtsi
@@ -2048,8 +2048,9 @@
compatible = "qcom,atoll-npucc", "syscon";
reg = <0x9980000 0x10000>,
<0x9800000 0x10000>,
- <0x9810000 0x10000>;
- reg-names = "cc", "qdsp6ss", "qdsp6ss_pll";
+ <0x9810000 0x10000>,
+ <0x007841e0 0x8>;
+ reg-names = "cc", "qdsp6ss", "qdsp6ss_pll", "efuse";
npu_gdsc-supply = <&npu_core_gdsc>;
vdd_cx-supply = <&VDD_CX_LEVEL>;
#clock-cells = <1>;
@@ -2268,7 +2269,7 @@
glink_npu: npu {
transport = "smem";
qcom,remote-pid = <10>;
- mboxes = <&apcs_glb2 4>;
+ mboxes = <&msm_npu 4>;
mbox-names = "npu_smem";
interrupts = <GIC_SPI 587 IRQ_TYPE_EDGE_RISING>;
@@ -2985,7 +2986,10 @@
vdd-1.8-xo-supply = <&L1C>;
vdd-1.3-rfa-supply = <&L2C>;
vdd-3.3-ch0-supply = <&L10C>;
+ vdd-3.3-ch1-supply = <&L11C>;
qcom,vdd-cx-mx-config = <640000 640000>;
+ qcom,vdd-3.3-ch0-config = <3000000 3312000>;
+ qcom,vdd-3.3-ch1-config = <3000000 3312000>;
qcom,smp2p_map_wlan_1_in {
interrupts-extended = <&smp2p_wlan_1_in 0 0>,
<&smp2p_wlan_1_in 1 0>;
@@ -3065,8 +3069,8 @@
qca,bt-vdd-xtal-supply = <&pm6150l_l1>;/* X0 */
qca,bt-vdd-io-voltage-level = <1700000 1900000>;
- qca,bt-vdd-core-voltage-level = <1245000 1350000>;
- qca,bt-vdd-pa-voltage-level = <3200000 3400000>;
+ qca,bt-vdd-core-voltage-level = <1304000 1304000>;
+ qca,bt-vdd-pa-voltage-level = <3000000 3312000>;
qca,bt-vdd-xtal-voltage-level = <1700000 1900000>;
qca,bt-vdd-io-current-level = <1>; /* LPM/PFM */
diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts
index c34299fdb28b..ec88f985aafc 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts
+++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku1.dts
@@ -85,12 +85,3 @@
};
};
};
-
-&sdhc_2 {
- /delete-property/ qcom,nonhotplug;
-
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
- &sdc2_wlan_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
- &sdc2_wlan_off>;
-};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts
index 307c4ca53ec3..7c3ae6371e92 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts
+++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku2.dts
@@ -180,12 +180,3 @@
};
};
};
-
-&sdhc_2 {
- /delete-property/ qcom,nonhotplug;
-
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
- &sdc2_wlan_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
- &sdc2_wlan_off>;
-};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts
index 905214d1e63f..aae3d808cfb2 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts
+++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku4.dts
@@ -83,12 +83,3 @@
};
};
};
-
-&sdhc_2 {
- /delete-property/ qcom,nonhotplug;
-
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
- &sdc2_wlan_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
- &sdc2_wlan_off>;
-};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts
index f6aaabf1348a..bd92c176147e 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts
+++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku5.dts
@@ -95,12 +95,3 @@
};
};
};
-
-&sdhc_2 {
- /delete-property/ qcom,nonhotplug;
-
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
- &sdc2_wlan_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
- &sdc2_wlan_off>;
-};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi b/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi
index b9ca3ff635d2..6d4aea8729bc 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs405-usb.dtsi
@@ -37,7 +37,7 @@
"utmi_clk", "xo", "noc_aggr_clk";
qcom,core-clk-rate = <200000000>;
- qcom,core-clk-rate-hs = <10000000>;
+ qcom,core-clk-rate-hs = <100000000>;
qcom,pm-qos-latency = <181>;
qcom,msm-bus,name = "usb3";
diff --git a/arch/arm64/boot/dts/qcom/qcs405.dtsi b/arch/arm64/boot/dts/qcom/qcs405.dtsi
index 4bd6fb21e850..739f05071abc 100644
--- a/arch/arm64/boot/dts/qcom/qcs405.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs405.dtsi
@@ -1307,8 +1307,6 @@
<&clock_gcc GCC_SDCC2_APPS_CLK>;
clock-names = "iface_clk", "core_clk";
- qcom,nonhotplug;
-
/* VDD is an external regulator eLDO5 */
vdd-io-supply = <&pms405_l6>;
qcom,vdd-io-voltage-level = <1800000 1800000>;
@@ -1319,8 +1317,10 @@
qcom,nonremovable;
pinctrl-names = "active", "sleep";
- pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on>;
- pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off>;
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+ &sdc2_wlan_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+ &sdc2_wlan_off>;
/delete-property/ qcom,devfreq,freq-table;
/delete-property/ cd-gpios;
@@ -1527,7 +1527,7 @@
* IO and XTAL share the same vreg.
**/
vdd-wlan-io-supply = <&pms405_l5>;
- qcom,cap-tsf-gpio = <&tlmm 42 1>;
+ qcom,cap-tsf-gpio = <&tlmm 53 1>;
qcom,wlan-ramdump-dynamic = <0x200000>;
qcom,msm-bus,name = "msm-cnss";
qcom,msm-bus,num-cases = <4>;
diff --git a/arch/arm64/boot/dts/qcom/sa6155-display.dtsi b/arch/arm64/boot/dts/qcom/sa6155-display.dtsi
index 8e203077e4c9..5bded584b2f3 100644
--- a/arch/arm64/boot/dts/qcom/sa6155-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa6155-display.dtsi
@@ -179,8 +179,9 @@
qcom,dsi-phy = <&mdss_dsi_phy0>;
clocks = <&mdss_dsi0_pll BYTE0_MUX_CLK>,
- <&mdss_dsi0_pll PIX0_MUX_CLK>;
- clock-names = "mux_byte_clk0", "mux_pixel_clk0";
+ <&mdss_dsi0_pll PIX0_MUX_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>;
+ clock-names = "mux_byte_clk0", "mux_pixel_clk0", "xo_clk";
qcom,dsi-display-list =
diff --git a/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi
index 1ab460283332..1e558a5a8fa4 100644
--- a/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8155-adp-star-display.dtsi
@@ -234,9 +234,10 @@
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
<&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>;
clock-names = "mux_byte_clk0", "mux_pixel_clk0",
- "mux_byte_clk1", "mux_pixel_clk1";
+ "mux_byte_clk1", "mux_pixel_clk1", "xo_clk";
qcom,dsi-display-list =
<&dsi_anx_7625_1>;
@@ -264,9 +265,10 @@
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
<&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>;
clock-names = "mux_byte_clk0", "mux_pixel_clk0",
- "mux_byte_clk1", "mux_pixel_clk1";
+ "mux_byte_clk1", "mux_pixel_clk1", "xo_clk";
qcom,dsi-display-list =
<&dsi_anx_7625_2>;
diff --git a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi
index b2eca5ab8c46..da26faa06524 100644
--- a/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sa8195p-adp-star-display.dtsi
@@ -228,9 +228,10 @@
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
<&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>;
clock-names = "mux_byte_clk0", "mux_pixel_clk0",
- "mux_byte_clk1", "mux_pixel_clk1";
+ "mux_byte_clk1", "mux_pixel_clk1", "xo_clk";
qcom,dsi-display-list =
<&dsi_anx_7625_1>;
@@ -258,9 +259,10 @@
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
<&mdss_dsi0_pll PCLK_MUX_0_CLK>,
<&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>;
clock-names = "mux_byte_clk0", "mux_pixel_clk0",
- "mux_byte_clk1", "mux_pixel_clk1";
+ "mux_byte_clk1", "mux_pixel_clk1", "xo_clk";
qcom,dsi-display-list =
<&dsi_anx_7625_2>;
diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-gpu.dtsi
index 1105923f9c36..f345bb99888a 100644
--- a/arch/arm64/boot/dts/qcom/sdmmagpie-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdmmagpie-gpu.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -77,7 +77,7 @@
qcom,ubwc-mode = <2>;
/* size in bytes */
- qcom,snapshot-size = <1048576>;
+ qcom,snapshot-size = <0x200000>;
/* base addr, size */
qcom,gpu-qdss-stm = <0x161c0000 0x40000>;
diff --git a/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi b/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi
index ca96afac6846..724af0c5ddc2 100644
--- a/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdmmagpie-sde.dtsi
@@ -128,9 +128,9 @@
qcom,sde-mixer-blend-op-off = <0x20 0x38 0x50 0x68 0x80 0x98
0xb0 0xc8 0xe0 0xf8 0x110>;
- qcom,sde-max-per-pipe-bw-kbps = <4500000
- 4500000 4500000
- 4500000 4500000>;
+ qcom,sde-max-per-pipe-bw-kbps = <3500000
+ 3500000 3500000
+ 3500000 3500000>;
/* offsets are relative to "mdp_phys + qcom,sde-off */
qcom,sde-sspp-clk-ctrl =
@@ -155,8 +155,8 @@
qcom,sde-has-dest-scaler;
qcom,sde-max-dest-scaler-input-linewidth = <2048>;
qcom,sde-max-dest-scaler-output-linewidth = <2560>;
- qcom,sde-max-bw-low-kbps = <12800000>;
- qcom,sde-max-bw-high-kbps = <12800000>;
+ qcom,sde-max-bw-low-kbps = <7100000>;
+ qcom,sde-max-bw-high-kbps = <7100000>;
qcom,sde-min-core-ib-kbps = <2400000>;
qcom,sde-min-llcc-ib-kbps = <800000>;
qcom,sde-min-dram-ib-kbps = <800000>;
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi
index 4db8391f2908..82b5ec0853fa 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-256.dtsi
@@ -177,8 +177,10 @@
&soc {
bluetooth: bt_qca6390 {
compatible = "qca,qca6390";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
qca,bt-reset-gpio = <&pmxprairie_gpios 6 0>;
qca,bt-vdd-core-supply = <&pmxprairie_s3>; /* VDD_PMU_AON_I */
- qca,bt-vdd-core-voltage-level = <800000 800000>;
+ qca,bt-vdd-core-voltage-level = <950000 950000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dts b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dts
new file mode 100644
index 000000000000..5cf1f61cfb2a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdxprairie-cdp-v1.1-cpe.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDXPRAIRIE CDP (CPE-1.1)";
+ compatible = "qcom,sdxprairie-cdp",
+ "qcom,sdxprairie", "qcom,cdp";
+ qcom,board-id = <0x5010101 0x0>;
+};
+
+
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dtsi
new file mode 100644
index 000000000000..f4f6843b16e4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp-v1.1-cpe.dtsi
@@ -0,0 +1,18 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxprairie.dtsi"
+#include "sdxprairie-cdp.dtsi"
+
+&qnand_1 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi
index 61d4d60c3b41..f97534b82172 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-cdp.dtsi
@@ -177,9 +177,11 @@
&soc {
bluetooth: bt_qca6390 {
compatible = "qca,qca6390";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
qca,bt-reset-gpio = <&pmxprairie_gpios 6 0>;
qca,bt-vdd-core-supply = <&pmxprairie_s3>; /* VDD_PMU_AON_I */
- qca,bt-vdd-core-voltage-level = <800000 800000>;
+ qca,bt-vdd-core-voltage-level = <950000 950000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi
index da30daa0e709..6f9dd6dc172a 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-256.dtsi
@@ -184,9 +184,11 @@
&soc {
bluetooth: bt_qca6390 {
compatible = "qca,qca6390";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
qca,bt-reset-gpio = <&pmxprairie_gpios 6 0>;
qca,bt-vdd-core-supply = <&pmxprairie_s3>; /* VDD_PMU_AON_I */
- qca,bt-vdd-core-voltage-level = <800000 800000>;
+ qca,bt-vdd-core-voltage-level = <950000 950000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
index 18ad2b44f840..0e45fc42fef4 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
@@ -76,3 +76,7 @@
&mhi_net_device {
status = "ok";
};
+
+&sdx_ext_ipc {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dts b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dts
new file mode 100644
index 000000000000..c8b07d31a3f8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dts
@@ -0,0 +1,22 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "sdxprairie-mtp-v1.1-cpe.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDXPRAIRIE MTP (CPE-1.1)";
+ compatible = "qcom,sdxprairie-mtp",
+ "qcom,sdxprairie", "qcom,mtp";
+ qcom,board-id = <0x7010108 0x0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dtsi
new file mode 100644
index 000000000000..007fd6c7faee
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-v1.1-cpe.dtsi
@@ -0,0 +1,18 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdxprairie.dtsi"
+#include "sdxprairie-mtp.dtsi"
+
+&qnand_1 {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi
index c8dc73a40b29..28262a29db70 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp.dtsi
@@ -184,9 +184,11 @@
&soc {
bluetooth: bt_qca6390 {
compatible = "qca,qca6390";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
qca,bt-reset-gpio = <&pmxprairie_gpios 6 0>;
qca,bt-vdd-core-supply = <&pmxprairie_s3>; /* VDD_PMU_AON_I */
- qca,bt-vdd-core-voltage-level = <800000 800000>;
+ qca,bt-vdd-core-voltage-level = <950000 950000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-pmic-overlay.dtsi
index fc9bb5b6f7f3..70e8cfdf244d 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-pmic-overlay.dtsi
@@ -48,4 +48,14 @@
input-enable;
};
};
+ bt_en {
+ bt_en_default: bt_en_default {
+ pins = "gpio6";
+ function = "normal";
+ output-low;
+ bias-pull-down;
+ qcom,drive-strength = <2>;
+ power-source = <1>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi
index e6647594409f..af31745d80c4 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-rumi.dtsi
@@ -86,8 +86,10 @@
&soc {
bluetooth: bt_qca6390 {
compatible = "qca,qca6390";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_en_default>;
qca,bt-reset-gpio = <&pmxprairie_gpios 6 0>;
qca,bt-vdd-core-supply = <&pmxprairie_s3>; /* VDD_PMU_AON_I */
- qca,bt-vdd-core-voltage-level = <800000 800000>;
+ qca,bt-vdd-core-voltage-level = <950000 950000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi
new file mode 100644
index 000000000000..0ad7d91a1659
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-thermal-common.dtsi
@@ -0,0 +1,392 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/thermal/qmi_thermal.h>
+
+&soc {
+ qmi-tmd-devices {
+ compatible = "qcom,qmi-cooling-devices";
+
+ modem {
+ qcom,instance-id = <QMI_MODEM_NR_INST_ID>;
+
+ modem_pa: modem_pa {
+ qcom,qmi-dev-name = "pa";
+ #cooling-cells = <2>;
+ };
+
+ modem_pa_fr1: modem_pa_fr1 {
+ qcom,qmi-dev-name = "pa_fr1";
+ #cooling-cells = <2>;
+ };
+
+ modem_tj: modem_tj {
+ qcom,qmi-dev-name = "modem";
+ #cooling-cells = <2>;
+ };
+
+ modem_current: modem_current {
+ qcom,qmi-dev-name = "modem_current";
+ #cooling-cells = <2>;
+ };
+
+ modem_skin: modem_skin {
+ qcom,qmi-dev-name = "modem_skin";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw_skin0: modem_mmw_skin0 {
+ qcom,qmi-dev-name = "mmw_skin0";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw_skin1: modem_mmw_skin1 {
+ qcom,qmi-dev-name = "mmw_skin1";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw_skin2: modem_mmw_skin2 {
+ qcom,qmi-dev-name = "mmw_skin2";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw_skin3: modem_mmw_skin3 {
+ qcom,qmi-dev-name = "mmw_skin3";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw0: modem_mmw0 {
+ qcom,qmi-dev-name = "mmw0";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw1: modem_mmw1 {
+ qcom,qmi-dev-name = "mmw1";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw2: modem_mmw2 {
+ qcom,qmi-dev-name = "mmw2";
+ #cooling-cells = <2>;
+ };
+
+ modem_mmw3: modem_mmw3 {
+ qcom,qmi-dev-name = "mmw3";
+ #cooling-cells = <2>;
+ };
+
+ modem_bcl: modem_bcl {
+ qcom,qmi-dev-name = "vbatt_low";
+ #cooling-cells = <2>;
+ };
+
+ modem_charge_state: modem_charge_state {
+ qcom,qmi-dev-name = "charge_state";
+ #cooling-cells = <2>;
+ };
+
+ modem_vdd: modem_vdd {
+ qcom,qmi-dev-name = "cpuv_restriction_cold";
+ #cooling-cells = <2>;
+ };
+ };
+ };
+
+ qmi_sensor: qmi-ts-sensors {
+ compatible = "qcom,qmi-sensors";
+ #thermal-sensor-cells = <1>;
+
+ modem {
+ qcom,instance-id = <QMI_MODEM_NR_INST_ID>;
+ qcom,qmi-sensor-names = "pa",
+ "pa_1",
+ "qfe_wtr0",
+ "modem_tsens",
+ "qfe_mmw0",
+ "qfe_mmw1",
+ "qfe_mmw2",
+ "qfe_mmw3",
+ "xo_therm",
+ "qfe_mmw_streamer0",
+ "qfe_mmw0_mod",
+ "qfe_mmw1_mod",
+ "qfe_mmw2_mod",
+ "qfe_mmw3_mod",
+ "qfe_ret_pa0",
+ "qfe_wtr_pa0",
+ "qfe_wtr_pa1",
+ "qfe_wtr_pa2",
+ "qfe_wtr_pa3",
+ "sys_therm1",
+ "sys_therm2",
+ "modem_tsens1";
+ };
+ };
+};
+
+&thermal_zones {
+ modem-lte-sub6-pa1 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_PA)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-lte-sub6-pa2 {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_PA_1)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_0)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_1)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw2-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_2)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw3-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_3)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-skin-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_XO_THERM)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-wifi-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_SYS_THERM_1)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-ambient-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_SYS_THERM_2)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-0-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_MODEM_TSENS)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-1-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_MODEM_TSENS_1)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-streamer-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_STREAMER_0)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw0-mod-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_0_MOD)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw1-mod-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_1_MOD)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw2-mod-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_2_MOD)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ modem-mmw3-mod-usr {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-governor = "user_space";
+ thermal-sensors = <&qmi_sensor
+ (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_3_MOD)>;
+ wake-capable-sensor;
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-thermal.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-thermal.dtsi
index 8cee48986a78..6cb1eef418e3 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-thermal.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-thermal.dtsi
@@ -11,55 +11,7 @@
*/
#include <dt-bindings/thermal/thermal.h>
-
-&soc {
- qmi-tmd-devices {
- compatible = "qcom,qmi-cooling-devices";
-
- modem {
- qcom,instance-id = <0x0>;
-
- modem_pa: modem_pa {
- qcom,qmi-dev-name = "pa";
- #cooling-cells = <2>;
- };
-
- modem_proc: modem_proc {
- qcom,qmi-dev-name = "modem";
- #cooling-cells = <2>;
- };
-
- modem_current: modem_current {
- qcom,qmi-dev-name = "modem_current";
- #cooling-cells = <2>;
- };
-
- modem_skin: modem_skin {
- qcom,qmi-dev-name = "modem_skin";
- #cooling-cells = <2>;
- };
-
- modem_vdd: modem_vdd {
- qcom,qmi-dev-name = "cpuv_restriction_cold";
- #cooling-cells = <2>;
- };
- };
-
- };
-
- qmi_sensor: qmi-ts-sensors {
- compatible = "qcom,qmi-sensors";
- #thermal-sensor-cells = <1>;
-
- modem {
- qcom,instance-id = <0x0>;
- qcom,qmi-sensor-names = "pa",
- "pa_1",
- "qfe_pa0",
- "qfe_wtr0";
- };
- };
-};
+#include "sdxprairie-thermal-common.dtsi"
&thermal_zones {
aoss0-usr {
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-usb.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-usb.dtsi
index 2bb5ae256511..00f57cb9e46a 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-usb.dtsi
@@ -206,8 +206,8 @@
USB3_UNI_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0xde 0
USB3_UNI_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x07 0
USB3_UNI_QSERDES_COM_VCO_TUNE_MAP 0x02 0
- USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH4 0xb8 0
- USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH3 0xff 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH4 0x26 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH3 0x7f 0
USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH2 0xbf 0
USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH 0x7f 0
USB3_UNI_QSERDES_RX_RX_MODE_00_LOW 0x7f 0
@@ -217,15 +217,15 @@
USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH 0xdc 0
USB3_UNI_QSERDES_RX_RX_MODE_01_LOW 0xdc 0
USB3_UNI_QSERDES_RX_UCDR_PI_CONTROLS 0x99 0
- USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH1 0x04 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH1 0x08 0
USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH2 0x08 0
- USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN1 0x05 0
- USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN2 0x05 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN1 0x00 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN2 0x04 0
USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x2f 0
USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0xff 0
USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x0f 0
USB3_UNI_QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x7f 0
- USB3_UNI_QSERDES_RX_UCDR_FO_GAIN 0x0a 0
+ USB3_UNI_QSERDES_RX_UCDR_FO_GAIN 0x09 0
USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL1 0x54 0
USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL2 0x0c 0
USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0f 0
@@ -239,12 +239,13 @@
USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x00 0
USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_LOW 0xc0 0
USB3_UNI_QSERDES_RX_DFE_CTLE_POST_CAL_OFFSET 0x38 0
- USB3_UNI_QSERDES_RX_UCDR_SO_GAIN 0x06 0
+ USB3_UNI_QSERDES_RX_UCDR_SO_GAIN 0x05 0
USB3_UNI_QSERDES_RX_DCC_CTRL1 0x0c 0
USB3_UNI_QSERDES_RX_GM_CAL 0x1f 0
USB3_UNI_QSERDES_TX_RCV_DETECT_LVL_2 0x12 0
USB3_UNI_QSERDES_TX_LANE_MODE_1 0xd5 0
- USB3_UNI_QSERDES_TX_PI_QEC_CTRL 0x40 0
+ USB3_UNI_QSERDES_TX_LANE_MODE_2 0x80 0
+ USB3_UNI_QSERDES_TX_PI_QEC_CTRL 0x20 0
USB3_UNI_QSERDES_TX_RES_CODE_LANE_OFFSET_TX 0x08 0
USB3_UNI_PCS_LOCK_DETECT_CONFIG1 0xd0 0
USB3_UNI_PCS_LOCK_DETECT_CONFIG2 0x07 0
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
index 5c4e0081fb23..83166517c77b 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
@@ -77,3 +77,7 @@
&mhi_net_device {
status = "ok";
};
+
+&sdx_ext_ipc {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi
index 43ed9857eb30..591fd17ed020 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie.dtsi
@@ -40,10 +40,10 @@
#size-cells = <1>;
ranges;
- mpss_adsp_mem: mpss_adsp_region@90c00000 {
+ mpss_adsp_mem: mpss_adsp_region@90800000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0x90c00000 0xd400000>;
+ reg = <0x90800000 0xd800000>;
label = "mpss_adsp_mem";
};
@@ -1053,7 +1053,7 @@
pinctrl-0 = <&cnss_wlan_en_active>;
pinctrl-1 = <&cnss_wlan_en_sleep>;
vdd-wlan-aon-supply = <&pmxprairie_s3>;
- qcom,vdd-wlan-aon-info = <800000 800000 0 0>;
+ qcom,vdd-wlan-aon-info = <950000 950000 0 0>;
qcom,wlan-rc-num = <0>;
qcom,wlan-ramdump-dynamic = <0x400000>;
qcom,smmu-s1-enable;
@@ -1463,6 +1463,14 @@
qcom,iova-mapping = <0x80000000 0x40000000>;
};
};
+
+ sdx_ext_ipc: qcom,sdx_ext_ipc {
+ compatible = "qcom,sdx-ext-ipc";
+ qcom,ap2mdm-status-gpio = <&tlmm 64 0x00>;
+ qcom,mdm2ap-status-gpio = <&tlmm 63 0x00>;
+ qcom,mdm2ap-status2-gpio = <&tlmm 66 0x00>;
+ status = "disabled";
+ };
};
#include "sdxprairie-pm.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi
index 170bb8c34d75..ed110c301255 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150-sdx50m.dtsi
@@ -29,6 +29,7 @@
qcom,smmu-cfg = <0>;
memory-region = <&mhi_mem>;
mhi,use-bb;
+ mhi,allow-m1; /* use legacy mode for suspends */
mhi,buffer-len = <0x8000>;
qcom,addr-win = <0x0 0xa0000000 0x0 0xa4bfffff>;
diff --git a/arch/arm64/boot/dts/qcom/sm8150-sdxprairie.dtsi b/arch/arm64/boot/dts/qcom/sm8150-sdxprairie.dtsi
index 1ff61d0f3c71..39a9b0f300bf 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-sdxprairie.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150-sdxprairie.dtsi
@@ -10,8 +10,8 @@
* GNU General Public License for more details.
*/
-#include <dt-bindings/thermal/qmi_thermal.h>
#include <dt-bindings/msm/msm-bus-ids.h>
+#include "sdxprairie-thermal-common.dtsi"
&mdm3 {
compatible = "qcom,ext-sdxprairie";
@@ -275,367 +275,3 @@
&wil6210 {
status = "disabled";
};
-
-&soc {
- qmi-tmd-devices {
- compatible = "qcom,qmi-cooling-devices";
-
- modem {
- qcom,instance-id = <QMI_MODEM_NR_INST_ID>;
-
- modem_pa: modem_pa {
- qcom,qmi-dev-name = "pa";
- #cooling-cells = <2>;
- };
-
- modem_tj: modem_tj {
- qcom,qmi-dev-name = "modem";
- #cooling-cells = <2>;
- };
-
- modem_current: modem_current {
- qcom,qmi-dev-name = "modem_current";
- #cooling-cells = <2>;
- };
-
- modem_skin: modem_skin {
- qcom,qmi-dev-name = "modem_skin";
- #cooling-cells = <2>;
- };
-
- modem_mmw_skin0: modem_mmw_skin0 {
- qcom,qmi-dev-name = "modem_skin0";
- #cooling-cells = <2>;
- };
-
- modem_mmw_skin1: modem_mmw_skin1 {
- qcom,qmi-dev-name = "modem_skin1";
- #cooling-cells = <2>;
- };
-
- modem_mmw_skin2: modem_mmw_skin2 {
- qcom,qmi-dev-name = "modem_skin2";
- #cooling-cells = <2>;
- };
-
- modem_mmw_skin3: modem_mmw_skin3 {
- qcom,qmi-dev-name = "modem_skin3";
- #cooling-cells = <2>;
- };
-
- modem_mmw0: modem_mmw0 {
- qcom,qmi-dev-name = "mmw0";
- #cooling-cells = <2>;
- };
-
- modem_mmw1: modem_mmw1 {
- qcom,qmi-dev-name = "mmw1";
- #cooling-cells = <2>;
- };
-
- modem_mmw2: modem_mmw2 {
- qcom,qmi-dev-name = "mmw2";
- #cooling-cells = <2>;
- };
-
- modem_mmw3: modem_mmw3 {
- qcom,qmi-dev-name = "mmw3";
- #cooling-cells = <2>;
- };
-
- modem_bcl: modem_bcl {
- qcom,qmi-dev-name = "vbatt_low";
- #cooling-cells = <2>;
- };
- };
- };
-
- qmi_sensor: qmi-ts-sensors {
- compatible = "qcom,qmi-sensors";
- #thermal-sensor-cells = <1>;
-
- modem {
- qcom,instance-id = <QMI_MODEM_NR_INST_ID>;
- qcom,qmi-sensor-names = "pa",
- "pa_1",
- "qfe_wtr0",
- "modem_tsens",
- "qfe_mmw0",
- "qfe_mmw1",
- "qfe_mmw2",
- "qfe_mmw3",
- "xo_therm",
- "qfe_mmw_streamer0",
- "qfe_mmw0_mod",
- "qfe_mmw1_mod",
- "qfe_mmw2_mod",
- "qfe_mmw3_mod",
- "qfe_ret_pa0",
- "qfe_wtr_pa0",
- "qfe_wtr_pa1",
- "qfe_wtr_pa2",
- "qfe_wtr_pa3",
- "sys_therm1",
- "sys_therm2",
- "modem_tsens1";
- };
- };
-};
-
-&thermal_zones {
- modem-lte-sub6-pa1 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_PA)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-lte-sub6-pa2 {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_PA_1)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw0-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_0)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw1-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_1)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw2-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_2)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw3-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_3)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-skin-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_XO_THERM)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-wifi-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_SYS_THERM_1)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-ambient-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_SYS_THERM_2)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-0-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_MODEM_TSENS)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-1-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_MODEM_TSENS_1)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-streamer-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_STREAMER_0)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw0-mod-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_0_MOD)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw1-mod-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_1_MOD)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw2-mod-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_2_MOD)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-
- modem-mmw3-mod-usr {
- polling-delay-passive = <0>;
- polling-delay = <0>;
- thermal-governor = "user_space";
- thermal-sensors = <&qmi_sensor
- (QMI_MODEM_NR_INST_ID+QMI_QFE_MMW_3_MOD)>;
- wake-capable-sensor;
- trips {
- active-config0 {
- temperature = <125000>;
- hysteresis = <1000>;
- type = "passive";
- };
- };
- };
-};
diff --git a/arch/arm64/configs/vendor/atoll-perf_defconfig b/arch/arm64/configs/vendor/atoll-perf_defconfig
index ea000c0f5943..5f555a29d70d 100644
--- a/arch/arm64/configs/vendor/atoll-perf_defconfig
+++ b/arch/arm64/configs/vendor/atoll-perf_defconfig
@@ -71,7 +71,6 @@ CONFIG_CMA=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
-# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_ARM64_SSBD=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
diff --git a/arch/arm64/configs/vendor/atoll_defconfig b/arch/arm64/configs/vendor/atoll_defconfig
index f8505fa63a47..1e7670d86b28 100644
--- a/arch/arm64/configs/vendor/atoll_defconfig
+++ b/arch/arm64/configs/vendor/atoll_defconfig
@@ -75,7 +75,6 @@ CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_SECCOMP=y
-# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_PRINT_VMEMLAYOUT=y
CONFIG_ARM64_SSBD=y
CONFIG_ARMV8_DEPRECATED=y
@@ -591,6 +590,8 @@ CONFIG_QCOM_APCS_IPC=y
CONFIG_MSM_QMP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
CONFIG_QCOM_LAZY_MAPPING=y
CONFIG_IOMMU_DEBUG=y
CONFIG_IOMMU_DEBUG_TRACKING=y
diff --git a/arch/arm64/configs/vendor/qcs403-perf_defconfig b/arch/arm64/configs/vendor/qcs403-perf_defconfig
index cf04736e1c47..0427d4dd5e8b 100644
--- a/arch/arm64/configs/vendor/qcs403-perf_defconfig
+++ b/arch/arm64/configs/vendor/qcs403-perf_defconfig
@@ -216,6 +216,7 @@ CONFIG_RFKILL=y
CONFIG_NTAG_NQ=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_ALIGNMENT=4
CONFIG_MHI_BUS=y
CONFIG_MHI_QCOM=y
CONFIG_MHI_NETDEV=y
@@ -452,6 +453,7 @@ CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_QCOM_SCM=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_WDOG_IPI_ENABLE=y
diff --git a/arch/arm64/configs/vendor/qcs403_defconfig b/arch/arm64/configs/vendor/qcs403_defconfig
index c859d4b29da2..cf6263c0fd35 100644
--- a/arch/arm64/configs/vendor/qcs403_defconfig
+++ b/arch/arm64/configs/vendor/qcs403_defconfig
@@ -222,6 +222,7 @@ CONFIG_RFKILL=y
CONFIG_NTAG_NQ=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_ALIGNMENT=4
CONFIG_MHI_BUS=y
CONFIG_MHI_DEBUG=y
CONFIG_MHI_QCOM=y
diff --git a/arch/arm64/configs/vendor/qcs405-perf_defconfig b/arch/arm64/configs/vendor/qcs405-perf_defconfig
index e03204ce138d..771726316907 100644
--- a/arch/arm64/configs/vendor/qcs405-perf_defconfig
+++ b/arch/arm64/configs/vendor/qcs405-perf_defconfig
@@ -211,7 +211,6 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_RC_MINSTREL_VHT=y
CONFIG_MAC80211_DEBUGFS=y
CONFIG_RFKILL=y
-CONFIG_NTAG_NQ=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_MTD=y
@@ -449,6 +448,7 @@ CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_QCOM_SCM=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_MSM_DEBUG_LAR_UNLOCK=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_WDOG_IPI_ENABLE=y
diff --git a/arch/arm64/configs/vendor/qcs405_defconfig b/arch/arm64/configs/vendor/qcs405_defconfig
index 4539f82613cf..05953a1901cc 100644
--- a/arch/arm64/configs/vendor/qcs405_defconfig
+++ b/arch/arm64/configs/vendor/qcs405_defconfig
@@ -217,7 +217,6 @@ CONFIG_MAC80211=m
CONFIG_MAC80211_RC_MINSTREL_VHT=y
CONFIG_MAC80211_DEBUGFS=y
CONFIG_RFKILL=y
-CONFIG_NTAG_NQ=y
CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_MTD=y
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index 0da28632319c..63f36ed39e33 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -438,10 +438,11 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
return -EINVAL;
}
- /* register with pcie rc for WAKE# events */
+ /* register with pcie rc for WAKE# or link state events */
reg_event = &arch_info->pcie_reg_event;
- reg_event->events =
- MSM_PCIE_EVENT_WAKEUP | MSM_PCIE_EVENT_L1SS_TIMEOUT;
+ reg_event->events = mhi_dev->allow_m1 ?
+ (MSM_PCIE_EVENT_WAKEUP) :
+ (MSM_PCIE_EVENT_WAKEUP | MSM_PCIE_EVENT_L1SS_TIMEOUT);
reg_event->user = mhi_dev->pci_dev;
reg_event->callback = mhi_arch_pci_link_state_cb;
@@ -658,7 +659,8 @@ int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
MHI_LOG("Entered\n");
/* disable inactivity timer */
- msm_pcie_l1ss_timeout_disable(pci_dev);
+ if (!mhi_dev->allow_m1)
+ msm_pcie_l1ss_timeout_disable(pci_dev);
switch (mhi_dev->suspend_mode) {
case MHI_DEFAULT_SUSPEND:
@@ -686,7 +688,7 @@ int mhi_arch_link_suspend(struct mhi_controller *mhi_cntrl)
}
exit_suspend:
- if (ret)
+ if (ret && !mhi_dev->allow_m1)
msm_pcie_l1ss_timeout_enable(pci_dev);
MHI_LOG("Exited with ret:%d\n", ret);
@@ -762,7 +764,8 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
return ret;
}
- msm_pcie_l1ss_timeout_enable(pci_dev);
+ if (!mhi_dev->allow_m1)
+ msm_pcie_l1ss_timeout_enable(pci_dev);
MHI_LOG("Exited\n");
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index dbb6926b8946..4ed779ab81df 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -702,6 +702,7 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
goto error_register;
use_bb = of_property_read_bool(of_node, "mhi,use-bb");
+ mhi_dev->allow_m1 = of_property_read_bool(of_node, "mhi,allow-m1");
/*
* if s1 translation enabled or using bounce buffer pull iova addr
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index 90608020fd71..95ed95322a2f 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -53,6 +53,7 @@ struct mhi_dev {
int resn;
void *arch_info;
bool powered_on;
+ bool allow_m1;
dma_addr_t iova_start;
dma_addr_t iova_stop;
enum mhi_suspend_mode suspend_mode;
diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c
index 1db34b69eace..85e27b7214d2 100644
--- a/drivers/bus/mhi/core/mhi_dtr.c
+++ b/drivers/bus/mhi/core/mhi_dtr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -173,6 +173,9 @@ static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
+
+ /* Notify the update */
+ mhi_notify(mhi_dev, MHI_CB_DTR_SIGNAL);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index 145157b35ba9..1d818a83028b 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -861,6 +861,9 @@ static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
mhi_netdev->abuffers, mhi_netdev->kbuffers,
mhi_netdev->rbuffers);
+ seq_printf(m, "chaining SKBs:%s\n", (mhi_netdev->chain) ?
+ "enabled" : "disabled");
+
return 0;
}
@@ -875,6 +878,22 @@ static const struct file_operations debugfs_stats = {
.read = seq_read,
};
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+ struct mhi_netdev *mhi_netdev = data;
+ struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+ mhi_netdev->chain = NULL;
+
+ if (rsc_dev)
+ rsc_dev->chain = NULL;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+ mhi_netdev_debugfs_chain, "%llu\n");
+
static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
{
char node_name[32];
@@ -894,6 +913,8 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
mhi_netdev, &debugfs_stats);
+ debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+ mhi_netdev, &debugfs_chain);
}
static void mhi_netdev_create_debugfs_dir(void)
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index 7dd2ac71e65f..648ae65e3597 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,7 @@ struct uci_dev {
struct uci_chan ul_chan;
struct uci_chan dl_chan;
size_t mtu;
+ size_t actual_mtu; /* maximum size of incoming buffer */
int ref_count;
bool enabled;
void *ipc_log;
@@ -122,22 +123,24 @@ static int mhi_queue_inbound(struct uci_dev *uci_dev)
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
int nr_trbs = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
size_t mtu = uci_dev->mtu;
+ size_t actual_mtu = uci_dev->actual_mtu;
void *buf;
struct uci_buf *uci_buf;
int ret = -EIO, i;
for (i = 0; i < nr_trbs; i++) {
- buf = kmalloc(mtu + sizeof(*uci_buf), GFP_KERNEL);
+ buf = kmalloc(mtu, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- uci_buf = buf + mtu;
+ uci_buf = buf + actual_mtu;
uci_buf->data = buf;
- MSG_VERB("Allocated buf %d of %d size %ld\n", i, nr_trbs, mtu);
+ MSG_VERB("Allocated buf %d of %d size %ld\n", i, nr_trbs,
+ actual_mtu);
- ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf, mtu,
- MHI_EOT);
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, buf,
+ actual_mtu, MHI_EOT);
if (ret) {
kfree(buf);
MSG_ERR("Failed to queue buffer %d\n", i);
@@ -412,8 +415,8 @@ static ssize_t mhi_uci_read(struct file *file,
if (uci_dev->enabled)
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
- uci_buf->data, uci_dev->mtu,
- MHI_EOT);
+ uci_buf->data,
+ uci_dev->actual_mtu, MHI_EOT);
else
ret = -ERESTARTSYS;
@@ -600,6 +603,7 @@ static int mhi_uci_probe(struct mhi_device *mhi_dev,
};
uci_dev->mtu = min_t(size_t, id->driver_data, mhi_dev->mtu);
+ uci_dev->actual_mtu = uci_dev->mtu - sizeof(struct uci_buf);
mhi_device_set_devdata(mhi_dev, uci_dev);
uci_dev->enabled = true;
@@ -643,7 +647,7 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
}
spin_lock_irqsave(&uci_chan->lock, flags);
- buf = mhi_result->buf_addr + uci_dev->mtu;
+ buf = mhi_result->buf_addr + uci_dev->actual_mtu;
buf->data = mhi_result->buf_addr;
buf->len = mhi_result->bytes_xferd;
list_add_tail(&buf->node, &uci_chan->pending);
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 147c73e12e62..a4a8cdbe212d 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -224,6 +224,7 @@ struct smq_invoke_ctx {
struct fastrpc_buf *lbuf;
size_t used;
struct fastrpc_file *fl;
+ uint32_t handle;
uint32_t sc;
struct overlap *overs;
struct overlap **overps;
@@ -231,6 +232,7 @@ struct smq_invoke_ctx {
uint32_t *crc;
unsigned int magic;
uint64_t ctxid;
+ bool pm_awake_voted;
};
struct fastrpc_ctx_lst {
@@ -308,6 +310,7 @@ struct fastrpc_apps {
spinlock_t ctxlock;
struct smq_invoke_ctx *ctxtable[FASTRPC_CTX_MAX];
bool legacy_remote_heap;
+ struct wakeup_source *wake_source;
};
struct fastrpc_mmap {
@@ -389,6 +392,8 @@ struct fastrpc_file {
/* Identifies the device (MINOR_NUM_DEV / MINOR_NUM_SECURE_DEV) */
int dev_minor;
char *debug_buf;
+ /* Flag to enable PM wake/relax voting for every remote invoke */
+ int wake_enable;
};
static struct fastrpc_apps gfa;
@@ -446,6 +451,9 @@ static struct fastrpc_channel_ctx gcinfo[NUM_CHANNELS] = {
static int hlosvm[1] = {VMID_HLOS};
static int hlosvmperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+static void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted);
+static void fastrpc_pm_relax(bool *pm_awake_voted);
+
static inline int64_t getnstimediff(struct timespec *start)
{
int64_t ns;
@@ -1199,6 +1207,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
goto bail;
}
ctx->crc = (uint32_t *)invokefd->crc;
+ ctx->handle = invoke->handle;
ctx->sc = invoke->sc;
if (bufs) {
VERIFY(err, 0 == context_build_overlap(ctx));
@@ -1210,6 +1219,7 @@ static int context_alloc(struct fastrpc_file *fl, uint32_t kernel,
ctx->tgid = fl->tgid;
init_completion(&ctx->work);
ctx->magic = FASTRPC_CTX_MAGIC;
+ ctx->pm_awake_voted = false;
spin_lock(&fl->hlock);
hlist_add_head(&ctx->hn, &clst->pending);
@@ -1280,6 +1290,7 @@ static void context_free(struct smq_invoke_ctx *ctx)
static void context_notify_user(struct smq_invoke_ctx *ctx, int retval)
{
ctx->retval = retval;
+ fastrpc_pm_awake(ctx->fl->wake_enable, &ctx->pm_awake_voted);
complete(&ctx->work);
}
@@ -1872,7 +1883,25 @@ static void fastrpc_init(struct fastrpc_apps *me)
me->channel[CDSP_DOMAIN_ID].secure = NON_SECURE_CHANNEL;
}
-static int fastrpc_release_current_dsp_process(struct fastrpc_file *fl);
+static inline void fastrpc_pm_awake(int fl_wake_enable, bool *pm_awake_voted)
+{
+ struct fastrpc_apps *me = &gfa;
+
+ if (!fl_wake_enable || *pm_awake_voted)
+ return;
+ __pm_stay_awake(me->wake_source);
+ *pm_awake_voted = true;
+}
+
+static inline void fastrpc_pm_relax(bool *pm_awake_voted)
+{
+ struct fastrpc_apps *me = &gfa;
+
+ if (!(*pm_awake_voted))
+ return;
+ __pm_relax(me->wake_source);
+ *pm_awake_voted = false;
+}
static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
uint32_t kernel,
@@ -1880,12 +1909,12 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
{
struct smq_invoke_ctx *ctx = NULL;
struct fastrpc_ioctl_invoke *invoke = &inv->inv;
- int cid = fl->cid;
- int interrupted = 0;
- int err = 0;
+ int err = 0, interrupted = 0, cid = fl->cid;
struct timespec invoket = {0};
int64_t *perf_counter = getperfcounter(fl, PERF_COUNT);
+ bool pm_awake_voted = false;
+ fastrpc_pm_awake(fl->wake_enable, &pm_awake_voted);
if (fl->profile)
getnstimeofday(&invoket);
@@ -1900,7 +1929,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
VERIFY(err, fl->cid >= 0 && fl->cid < NUM_CHANNELS && fl->sctx != NULL);
if (err) {
- pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+ pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
__func__, current->comm);
err = -EBADR;
goto bail;
@@ -1946,14 +1975,16 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
if (err)
goto bail;
wait:
+ fastrpc_pm_relax(&pm_awake_voted);
if (kernel)
wait_for_completion(&ctx->work);
- else {
+ else
interrupted = wait_for_completion_interruptible(&ctx->work);
- VERIFY(err, 0 == (err = interrupted));
- if (err)
- goto bail;
- }
+
+ pm_awake_voted = ctx->pm_awake_voted;
+ VERIFY(err, 0 == (err = interrupted));
+ if (err)
+ goto bail;
PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
inv_args(ctx);
@@ -1990,6 +2021,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
*count = *count+1;
}
}
+ fastrpc_pm_relax(&pm_awake_voted);
return err;
}
@@ -2760,9 +2792,12 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
break;
}
}
- VERIFY(err, idx < chan->sesscount);
- if (err)
+ if (idx >= chan->sesscount) {
+ err = -EUSERS;
+ pr_err("adsprpc: ERROR %d: %s: max concurrent sessions limit (%d) already reached on %s\n",
+ err, __func__, chan->sesscount, chan->subsys);
goto bail;
+ }
chan->session[idx].smmu.faults = 0;
} else {
VERIFY(err, me->dev != NULL);
@@ -3218,7 +3253,7 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
VERIFY(err, fl && fl->sctx && fl->cid >= 0 && fl->cid < NUM_CHANNELS);
if (err) {
- pr_err("adsprpc: ERROR: %s: user application %s domain is not set\n",
+ pr_err("adsprpc: ERROR: %s: kernel session not initialized yet for %s\n",
__func__, current->comm);
err = -EBADR;
return err;
@@ -3361,8 +3396,8 @@ static int fastrpc_get_info(struct fastrpc_file *fl, uint32_t *info)
fl->cid = cid;
fl->ssrcount = fl->apps->channel[cid].ssrcount;
mutex_lock(&fl->apps->channel[cid].smd_mutex);
- VERIFY(err, !fastrpc_session_alloc_locked(
- &fl->apps->channel[cid], 0, &fl->sctx));
+ err = fastrpc_session_alloc_locked(&fl->apps->channel[cid],
+ 0, &fl->sctx);
mutex_unlock(&fl->apps->channel[cid].smd_mutex);
if (err)
goto bail;
@@ -3405,8 +3440,11 @@ static int fastrpc_internal_control(struct fastrpc_file *fl,
case FASTRPC_CONTROL_KALLOC:
cp->kalloc.kalloc_support = 1;
break;
+ case FASTRPC_CONTROL_WAKELOCK:
+ fl->wake_enable = cp->wp.enable;
+ break;
default:
- err = -ENOTTY;
+ err = -EBADRQC;
break;
}
bail:
@@ -4303,11 +4341,19 @@ static int __init fastrpc_device_init(void)
err = register_rpmsg_driver(&fastrpc_rpmsg_client);
if (err) {
- pr_err("adsprpc: register_rpmsg_driver: failed with err %d\n",
- err);
+ pr_err("adsprpc: %s: register_rpmsg_driver failed with err %d\n",
+ __func__, err);
goto device_create_bail;
}
me->rpmsg_register = 1;
+
+ me->wake_source = wakeup_source_register("adsprpc");
+ VERIFY(err, !IS_ERR_OR_NULL(me->wake_source));
+ if (err) {
+ pr_err("adsprpc: Error: %s: wakeup_source_register failed with err %d\n",
+ __func__, PTR_ERR(me->wake_source));
+ goto device_create_bail;
+ }
return 0;
device_create_bail:
for (i = 0; i < NUM_CHANNELS; i++) {
@@ -4356,6 +4402,8 @@ static void __exit fastrpc_device_exit(void)
unregister_chrdev_region(me->dev_no, NUM_CHANNELS);
if (me->rpmsg_register == 1)
unregister_rpmsg_driver(&fastrpc_rpmsg_client);
+ if (me->wake_source)
+ wakeup_source_unregister(me->wake_source);
debugfs_remove_recursive(debugfs_root);
}
diff --git a/drivers/char/adsprpc_shared.h b/drivers/char/adsprpc_shared.h
index 91ce793d00aa..43026050135b 100644
--- a/drivers/char/adsprpc_shared.h
+++ b/drivers/char/adsprpc_shared.h
@@ -241,22 +241,32 @@ struct fastrpc_ioctl_perf { /* kernel performance data */
uintptr_t keys;
};
-#define FASTRPC_CONTROL_LATENCY (1)
+enum fastrpc_control_type {
+ FASTRPC_CONTROL_LATENCY = 1,
+ FASTRPC_CONTROL_SMMU = 2,
+ FASTRPC_CONTROL_KALLOC = 3,
+ FASTRPC_CONTROL_WAKELOCK = 4,
+};
+
struct fastrpc_ctrl_latency {
uint32_t enable; /* latency control enable */
uint32_t level; /* level of control */
};
-#define FASTRPC_CONTROL_KALLOC (3)
struct fastrpc_ctrl_kalloc {
uint32_t kalloc_support; /* Remote memory allocation from kernel */
};
-/* FASTRPC_CONTROL value 2 is reserved in user space */
+
+struct fastrpc_ctrl_wakelock {
+ uint32_t enable; /* wakelock control enable */
+};
+
struct fastrpc_ioctl_control {
uint32_t req;
union {
struct fastrpc_ctrl_latency lp;
struct fastrpc_ctrl_kalloc kalloc;
+ struct fastrpc_ctrl_wakelock wp;
};
};
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 15b828e5057c..b89e35229bc7 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -909,7 +909,8 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
goto end;
if (mask_size + write_len > dest_len)
mask_size = dest_len - write_len;
- memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
+ if (mask_size && src_len >= header_len + mask_size)
+ memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
write_len += mask_size;
for (i = 0; i < NUM_MD_SESSIONS; i++) {
if (i == APPS_DATA) {
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 489ab7828bf8..6cf7c1cc809d 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -73,6 +73,7 @@
#define DIAG_CON_SENSORS (0x0010) /* Bit mask for Sensors */
#define DIAG_CON_WDSP (0x0020) /* Bit mask for WDSP */
#define DIAG_CON_CDSP (0x0040) /* Bit mask for CDSP */
+#define DIAG_CON_NPU (0x0080) /* Bit mask for NPU */
#define DIAG_CON_UPD_WLAN (0x1000) /*Bit mask for WLAN PD*/
#define DIAG_CON_UPD_AUDIO (0x2000) /*Bit mask for AUDIO PD*/
@@ -82,7 +83,7 @@
#define DIAG_CON_ALL (DIAG_CON_APSS | DIAG_CON_MPSS \
| DIAG_CON_LPASS | DIAG_CON_WCNSS \
| DIAG_CON_SENSORS | DIAG_CON_WDSP \
- | DIAG_CON_CDSP)
+ | DIAG_CON_CDSP | DIAG_CON_NPU)
#define DIAG_CON_UPD_ALL (DIAG_CON_UPD_WLAN \
| DIAG_CON_UPD_AUDIO \
| DIAG_CON_UPD_SENSORS)
@@ -93,6 +94,7 @@
#define DIAG_STM_APPS 0x08
#define DIAG_STM_SENSORS 0x10
#define DIAG_STM_CDSP 0x20
+#define DIAG_STM_NPU 0x40
#define INVALID_PID -1
#define DIAG_CMD_FOUND 1
@@ -220,7 +222,8 @@
#define PERIPHERAL_SENSORS 3
#define PERIPHERAL_WDSP 4
#define PERIPHERAL_CDSP 5
-#define NUM_PERIPHERALS 6
+#define PERIPHERAL_NPU 6
+#define NUM_PERIPHERALS 7
#define APPS_DATA (NUM_PERIPHERALS)
#define UPD_WLAN 7
@@ -517,6 +520,7 @@ struct diag_query_pid_t {
struct diag_con_all_param_t {
uint32_t diag_con_all;
uint32_t num_peripherals;
+ uint32_t upd_map_supported;
};
struct diag_md_session_t {
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 2a86413aa1c1..5cafebbe2cb4 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -411,6 +411,8 @@ static uint32_t diag_translate_kernel_to_user_mask(uint32_t peripheral_mask)
ret |= DIAG_CON_WDSP;
if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_CDSP))
ret |= DIAG_CON_CDSP;
+ if (peripheral_mask & MD_PERIPHERAL_MASK(PERIPHERAL_NPU))
+ ret |= DIAG_CON_NPU;
if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_WLAN))
ret |= DIAG_CON_UPD_WLAN;
if (peripheral_mask & MD_PERIPHERAL_MASK(UPD_AUDIO))
@@ -1753,6 +1755,8 @@ static uint32_t diag_translate_mask(uint32_t peripheral_mask)
ret |= (1 << PERIPHERAL_WDSP);
if (peripheral_mask & DIAG_CON_CDSP)
ret |= (1 << PERIPHERAL_CDSP);
+ if (peripheral_mask & DIAG_CON_NPU)
+ ret |= (1 << PERIPHERAL_NPU);
if (peripheral_mask & DIAG_CON_UPD_WLAN)
ret |= (1 << UPD_WLAN);
if (peripheral_mask & DIAG_CON_UPD_AUDIO)
@@ -2431,6 +2435,8 @@ int diag_query_pd(char *process_name)
return PERIPHERAL_SENSORS;
if (diag_query_pd_name(process_name, "cdsp/root_pd"))
return PERIPHERAL_CDSP;
+ if (diag_query_pd_name(process_name, "npu/root_pd"))
+ return PERIPHERAL_NPU;
if (diag_query_pd_name(process_name, "wlan_pd"))
return UPD_WLAN;
if (diag_query_pd_name(process_name, "audio_pd"))
@@ -2852,6 +2858,7 @@ long diagchar_compat_ioctl(struct file *filp,
case DIAG_IOCTL_QUERY_CON_ALL:
con_param.diag_con_all = DIAG_CON_ALL;
con_param.num_peripherals = NUM_PERIPHERALS;
+ con_param.upd_map_supported = 1;
if (copy_to_user((void __user *)ioarg, &con_param,
sizeof(struct diag_con_all_param_t)))
result = -EFAULT;
@@ -3011,6 +3018,7 @@ long diagchar_ioctl(struct file *filp,
case DIAG_IOCTL_QUERY_CON_ALL:
con_param.diag_con_all = DIAG_CON_ALL;
con_param.num_peripherals = NUM_PERIPHERALS;
+ con_param.upd_map_supported = 1;
if (copy_to_user((void __user *)ioarg, &con_param,
sizeof(struct diag_con_all_param_t)))
result = -EFAULT;
@@ -3071,9 +3079,14 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
send.last = (void *)(buf + len - 1);
send.terminate = 1;
+wait_for_buffer:
wait_event_interruptible(driver->hdlc_wait_q,
(data->flushed == 0));
spin_lock_irqsave(&driver->diagmem_lock, flags);
+ if (data->flushed) {
+ spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+ goto wait_for_buffer;
+ }
if (!data->buf) {
data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
APF_DIAG_PADDING,
@@ -3096,19 +3109,7 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
ret = -EIO;
goto fail_free_buf;
}
- wait_event_interruptible(driver->hdlc_wait_q,
- (data->flushed == 0));
- spin_lock_irqsave(&driver->diagmem_lock, flags);
- data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
- APF_DIAG_PADDING,
- POOL_TYPE_HDLC);
- if (!data->buf) {
- ret = PKT_DROP;
- spin_unlock_irqrestore(&driver->diagmem_lock, flags);
- goto fail_ret;
- }
- data->allocated = 1;
- data->flushed = 0;
+ goto wait_for_buffer;
}
enc.dest = data->buf + data->len;
@@ -3130,9 +3131,14 @@ static int diag_process_apps_data_hdlc(unsigned char *buf, int len,
ret = -EIO;
goto fail_free_buf;
}
+wait_for_agg_buff:
wait_event_interruptible(driver->hdlc_wait_q,
(data->flushed == 0));
spin_lock_irqsave(&driver->diagmem_lock, flags);
+ if (data->flushed) {
+ spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+ goto wait_for_agg_buff;
+ }
data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
APF_DIAG_PADDING,
POOL_TYPE_HDLC);
@@ -3202,9 +3208,14 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
__func__, buf, len);
return -EIO;
}
+wait_for_buffer:
wait_event_interruptible(driver->hdlc_wait_q,
(data->flushed == 0));
spin_lock_irqsave(&driver->diagmem_lock, flags);
+ if (data->flushed) {
+ spin_unlock_irqrestore(&driver->diagmem_lock, flags);
+ goto wait_for_buffer;
+ }
if (!data->buf) {
data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
APF_DIAG_PADDING,
@@ -3226,20 +3237,7 @@ static int diag_process_apps_data_non_hdlc(unsigned char *buf, int len,
ret = -EIO;
goto fail_free_buf;
}
- wait_event_interruptible(driver->hdlc_wait_q,
- (data->flushed == 0));
-
- spin_lock_irqsave(&driver->diagmem_lock, flags);
- data->buf = diagmem_alloc(driver, DIAG_MAX_HDLC_BUF_SIZE +
- APF_DIAG_PADDING,
- POOL_TYPE_HDLC);
- if (!data->buf) {
- ret = PKT_DROP;
- spin_unlock_irqrestore(&driver->diagmem_lock, flags);
- goto fail_ret;
- }
- data->allocated = 1;
- data->flushed = 0;
+ goto wait_for_buffer;
}
header.start = CONTROL_CHAR;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index fe9027eaf478..b8194ebaf84f 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -634,6 +634,9 @@ int diag_process_stm_cmd(unsigned char *buf, int len, unsigned char *dest_buf)
if (mask & DIAG_STM_CDSP)
diag_process_stm_mask(cmd, DIAG_STM_CDSP,
PERIPHERAL_CDSP);
+ if (mask & DIAG_STM_NPU)
+ diag_process_stm_mask(cmd, DIAG_STM_NPU,
+ PERIPHERAL_NPU);
if (mask & DIAG_STM_APPS)
diag_process_stm_mask(cmd, DIAG_STM_APPS, APPS_DATA);
@@ -658,6 +661,9 @@ int diag_process_stm_cmd(unsigned char *buf, int len, unsigned char *dest_buf)
if (driver->feature[PERIPHERAL_CDSP].stm_support)
rsp_supported |= DIAG_STM_CDSP;
+ if (driver->feature[PERIPHERAL_NPU].stm_support)
+ rsp_supported |= DIAG_STM_NPU;
+
rsp_supported |= DIAG_STM_APPS;
/* Set mask denoting STM state/status for each peripheral/APSS */
@@ -676,6 +682,9 @@ int diag_process_stm_cmd(unsigned char *buf, int len, unsigned char *dest_buf)
if (driver->stm_state[PERIPHERAL_CDSP])
rsp_status |= DIAG_STM_CDSP;
+ if (driver->stm_state[PERIPHERAL_NPU])
+ rsp_status |= DIAG_STM_NPU;
+
if (driver->stm_state[APPS_DATA])
rsp_status |= DIAG_STM_APPS;
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index e9900d962574..5af2f40e90af 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -320,6 +320,8 @@ fail:
static int mhi_open(int id)
{
+ int err = 0;
+
if (id < 0 || id >= NUM_MHI_DEV) {
pr_err("diag: In %s, invalid index %d\n", __func__, id);
return -EINVAL;
@@ -330,7 +332,9 @@ static int mhi_open(int id)
* explicitly by Diag. Open both the read and write channels (denoted by
* OPEN_CHANNELS flag)
*/
- __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
+ err = __mhi_open(&diag_mhi[id], OPEN_CHANNELS);
+ if (err)
+ return err;
diag_remote_dev_open(diag_mhi[id].dev_id);
queue_work(diag_mhi[id].mhi_wq, &(diag_mhi[id].read_work));
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index e02f3b8e211e..d9f138152fad 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -28,7 +28,8 @@
((x == PERIPHERAL_WCNSS) ? DIAG_CON_WCNSS : \
((x == PERIPHERAL_SENSORS) ? DIAG_CON_SENSORS : \
((x == PERIPHERAL_WDSP) ? DIAG_CON_WDSP : \
- ((x == PERIPHERAL_CDSP) ? DIAG_CON_CDSP : 0)))))) \
+ ((x == PERIPHERAL_CDSP) ? DIAG_CON_CDSP : \
+ ((x == PERIPHERAL_NPU) ? DIAG_CON_NPU : 0))))))) \
#define PERIPHERAL_STRING(x) \
((x == PERIPHERAL_MODEM) ? "MODEM" : \
@@ -36,7 +37,8 @@
((x == PERIPHERAL_WCNSS) ? "WCNSS" : \
((x == PERIPHERAL_SENSORS) ? "SENSORS" : \
((x == PERIPHERAL_WDSP) ? "WDSP" : \
- ((x == PERIPHERAL_CDSP) ? "CDSP" : "UNKNOWN")))))) \
+ ((x == PERIPHERAL_CDSP) ? "CDSP" : \
+ ((x == PERIPHERAL_NPU) ? "NPU" : "UNKNOWN"))))))) \
struct diagfwd_buf_t {
unsigned char *data;
diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c
index dbad9568e9b7..c4a86172f037 100644
--- a/drivers/char/diag/diagfwd_rpmsg.c
+++ b/drivers/char/diag/diagfwd_rpmsg.c
@@ -93,6 +93,15 @@ struct diag_rpmsg_info rpmsg_data[NUM_PERIPHERALS] = {
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DATA,
+ .edge = "npu",
+ .name = "DIAG_DATA",
+ .buf1 = NULL,
+ .buf2 = NULL,
+ .hdl = NULL
}
};
@@ -150,6 +159,15 @@ struct diag_rpmsg_info rpmsg_cntl[NUM_PERIPHERALS] = {
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_CNTL,
+ .edge = "npu",
+ .name = "DIAG_CTRL",
+ .buf1 = NULL,
+ .buf2 = NULL,
+ .hdl = NULL
}
};
@@ -207,6 +225,15 @@ struct diag_rpmsg_info rpmsg_dci[NUM_PERIPHERALS] = {
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DCI,
+ .edge = "npu",
+ .name = "DIAG_DCI_DATA",
+ .buf1 = NULL,
+ .buf2 = NULL,
+ .hdl = NULL
}
};
@@ -264,6 +291,15 @@ struct diag_rpmsg_info rpmsg_cmd[NUM_PERIPHERALS] = {
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_CMD,
+ .edge = "npu",
+ .name = "DIAG_CMD",
+ .buf1 = NULL,
+ .buf2 = NULL,
+ .hdl = NULL
}
};
@@ -321,6 +357,15 @@ struct diag_rpmsg_info rpmsg_dci_cmd[NUM_PERIPHERALS] = {
.buf1 = NULL,
.buf2 = NULL,
.hdl = NULL
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DCI_CMD,
+ .edge = "npu",
+ .name = "DIAG_DCI_CMD",
+ .buf1 = NULL,
+ .buf2 = NULL,
+ .hdl = NULL
}
};
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 8228e7d54f60..9bad05684617 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -45,6 +45,7 @@
#define SENSORS_INST_BASE 192
#define CDSP_INST_BASE 256
#define WDSP_INST_BASE 320
+#define NPU_INST_BASE 384
#define INST_ID_CNTL 0
#define INST_ID_CMD 1
@@ -89,6 +90,11 @@ struct diag_socket_info socket_data[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_CDSP,
.type = TYPE_DATA,
.name = "CDSP_DATA"
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DATA,
+ .name = "NPU_DATA"
}
};
@@ -122,6 +128,11 @@ struct diag_socket_info socket_cntl[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_CDSP,
.type = TYPE_CNTL,
.name = "CDSP_CNTL"
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_CNTL,
+ .name = "NPU_CNTL"
}
};
@@ -155,6 +166,11 @@ struct diag_socket_info socket_dci[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_CDSP,
.type = TYPE_DCI,
.name = "CDSP_DCI"
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DCI,
+ .name = "NPU_DCI"
}
};
@@ -188,6 +204,11 @@ struct diag_socket_info socket_cmd[NUM_PERIPHERALS] = {
.peripheral = PERIPHERAL_CDSP,
.type = TYPE_CMD,
.name = "CDSP_CMD"
+ },
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_CMD,
+ .name = "NPU_CMD"
}
};
@@ -222,6 +243,11 @@ struct diag_socket_info socket_dci_cmd[NUM_PERIPHERALS] = {
.type = TYPE_DCI_CMD,
.name = "CDSP_DCI_CMD"
},
+ {
+ .peripheral = PERIPHERAL_NPU,
+ .type = TYPE_DCI_CMD,
+ .name = "NPU_DCI_CMD"
+ }
};
struct restart_notifier_block {
@@ -303,6 +329,7 @@ static struct restart_notifier_block restart_notifiers[] = {
{SOCKET_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
{SOCKET_SLPI, "slpi", .nb.notifier_call = restart_notifier_cb},
{SOCKET_CDSP, "cdsp", .nb.notifier_call = restart_notifier_cb},
+ {SOCKET_NPU, "npu", .nb.notifier_call = restart_notifier_cb},
};
void diag_socket_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
@@ -988,6 +1015,9 @@ static void __diag_socket_init(struct diag_socket_info *info)
case PERIPHERAL_CDSP:
ins_base = CDSP_INST_BASE;
break;
+ case PERIPHERAL_NPU:
+ ins_base = NPU_INST_BASE;
+ break;
}
switch (info->type) {
diff --git a/drivers/char/diag/diagfwd_socket.h b/drivers/char/diag/diagfwd_socket.h
index fb6b78623713..5f1464b91e07 100644
--- a/drivers/char/diag/diagfwd_socket.h
+++ b/drivers/char/diag/diagfwd_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@ enum {
SOCKET_WCNSS,
SOCKET_SLPI,
SOCKET_CDSP,
+ SOCKET_NPU,
SOCKET_APPS,
NUM_SOCKET_SUBSYSTEMS,
};
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index a5ed87caac9f..b06796416590 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -124,6 +124,7 @@
#define FABIA_TEST_CTL_HI 0x20
#define FABIA_OPMODE 0x2c
#define FABIA_FRAC_VAL 0x38
+#define FABIA_PLL_CAL_L_VAL 0x3f
#define FABIA_PLL_STANDBY 0x0
#define FABIA_PLL_RUN 0x1
#define FABIA_PLL_OUT_MASK 0x7
@@ -1918,6 +1919,9 @@ int clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
regmap_write(regmap, pll->offset + PLL_L_VAL,
config->l);
+ regmap_write(regmap, pll->offset + FABIA_CAL_L_VAL,
+ FABIA_PLL_CAL_L_VAL);
+
if (config->frac)
regmap_write(regmap, pll->offset + FABIA_FRAC_VAL,
config->frac);
@@ -1976,7 +1980,7 @@ static int clk_fabia_pll_enable(struct clk_hw *hw)
{
int ret;
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- u32 val, opmode_val, off = pll->offset, l_val;
+ u32 val, opmode_val, off = pll->offset, l_val, cal_val;
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
@@ -2002,8 +2006,13 @@ static int clk_fabia_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
- /* PLL has lost it's L value, needs reconfiguration */
- if (!l_val)
+ ret = regmap_read(pll->clkr.regmap, pll->offset + FABIA_CAL_L_VAL,
+ &cal_val);
+ if (ret)
+ return ret;
+
+ /* PLL has lost it's L or CAL value, needs reconfiguration */
+ if (!l_val || !cal_val)
pll->inited = false;
if (unlikely(!pll->inited)) {
@@ -2013,7 +2022,8 @@ static int clk_fabia_pll_enable(struct clk_hw *hw)
pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
return ret;
}
- pr_warn("PLL configuration lost, reconfiguration of PLL done.\n");
+ pr_warn("%s: PLL configuration lost, reconfiguration of PLL done.\n",
+ clk_hw_get_name(hw));
}
/* Disable PLL output */
@@ -2173,7 +2183,7 @@ static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long rrate;
- u32 regval, l, off = pll->offset;
+ u32 regval, l, off = pll->offset, cal_val;
u64 a;
int ret;
@@ -2181,6 +2191,26 @@ static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate,
if (ret)
return ret;
+ ret = regmap_read(pll->clkr.regmap, pll->offset + FABIA_CAL_L_VAL,
+ &cal_val);
+ if (ret)
+ return ret;
+
+ /* PLL has lost it's CAL value, needs reconfiguration */
+ if (!cal_val)
+ pll->inited = false;
+
+ if (unlikely(!pll->inited)) {
+ ret = clk_fabia_pll_configure(pll, pll->clkr.regmap,
+ pll->config);
+ if (ret) {
+ pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
+ return ret;
+ }
+ pr_warn("%s: PLL configuration lost, reconfiguration of PLL done.\n",
+ clk_hw_get_name(hw));
+ }
+
rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a);
/*
* Due to limited number of bits for fractional rate programming, the
@@ -2205,6 +2235,7 @@ static void clk_fabia_pll_list_registers(struct seq_file *f, struct clk_hw *hw)
static struct clk_register_data data[] = {
{"PLL_MODE", 0x0},
{"PLL_L_VAL", 0x4},
+ {"PLL_CAL_L_VAL", 0x8},
{"PLL_FRAC_VAL", 0x38},
{"PLL_USER_CTL_LO", 0xc},
{"PLL_USER_CTL_HI", 0x10},
diff --git a/drivers/clk/qcom/npucc-atoll.c b/drivers/clk/qcom/npucc-atoll.c
index cb44b4bbe5dc..08b95741745f 100644
--- a/drivers/clk/qcom/npucc-atoll.c
+++ b/drivers/clk/qcom/npucc-atoll.c
@@ -42,6 +42,8 @@
#define CRC_MND_CFG 0x11a4
#define CRC_MND_CFG_SETTING 0x15011
+#define NPU_FUSE_OFFSET 0x4
+
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
enum {
@@ -279,6 +281,17 @@ static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
{ }
};
+static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_no_crc_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ F(200000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ F(400000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ F(515000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ F(650000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ F(800000000, P_NPU_CC_CRC_DIV, 2, 0, 0),
+ { }
+};
+
static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
.cmd_rcgr = 0x1100,
.mnd_width = 0,
@@ -718,10 +731,24 @@ static const struct of_device_id npu_cc_atoll_match_table[] = {
};
MODULE_DEVICE_TABLE(of, npu_cc_atoll_match_table);
-static int enable_npu_crc(struct regmap *regmap, struct regulator *npu_gdsc)
+static int enable_npu_crc(struct platform_device *pdev, struct regmap *regmap,
+ struct regulator *npu_gdsc)
{
+ struct resource *res;
+ void __iomem *base;
+ u32 fuse_val, fuse1_val;
int ret;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "efuse");
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fuse_val = readl_relaxed(base) & GENMASK(31, 27);
+ fuse1_val = readl_relaxed(base + NPU_FUSE_OFFSET) & GENMASK(2, 0);
+
+ devm_iounmap(&pdev->dev, base);
+
/* Set npu_cc_cal_hm0_clk to the lowest supported frequency */
clk_set_rate(npu_cc_cal_hm0_clk.clkr.hw.clk,
clk_round_rate(npu_cc_cal_hm0_clk_src.clkr.hw.clk, 1));
@@ -741,9 +768,19 @@ static int enable_npu_crc(struct regmap *regmap, struct regulator *npu_gdsc)
return ret;
}
- /* Enable MND RC */
- regmap_write(regmap, CRC_MND_CFG, CRC_MND_CFG_SETTING);
- regmap_write(regmap, CRC_SID_FSM_CTRL, CRC_SID_FSM_CTRL_SETTING);
+ if (fuse_val || fuse1_val) {
+ regmap_write(regmap, CRC_MND_CFG, 0x0);
+ regmap_write(regmap, CRC_SID_FSM_CTRL, 0x0);
+
+ npu_cc_crc_div.div = 1;
+ npu_cc_cal_hm0_clk_src.freq_tbl =
+ ftbl_npu_cc_cal_hm0_clk_no_crc_src;
+ } else {
+ /* Enable MND RC */
+ regmap_write(regmap, CRC_MND_CFG, CRC_MND_CFG_SETTING);
+ regmap_write(regmap, CRC_SID_FSM_CTRL,
+ CRC_SID_FSM_CTRL_SETTING);
+ }
/* Wait for 16 cycles before continuing */
udelay(1);
@@ -800,7 +837,7 @@ static int npu_clocks_atoll_probe(struct platform_device *pdev,
}
if (!strcmp("cc", desc->config->name)) {
- ret = enable_npu_crc(regmap, npu_gdsc);
+ ret = enable_npu_crc(pdev, regmap, npu_gdsc);
if (ret) {
dev_err(&pdev->dev,
"Failed to enable CRC for NPU cal RCG\n");
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 08218503b2fe..ab9f7f6dec84 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -26,7 +26,8 @@
#include <soc/qcom/qseecomi.h>
#include "iceregs.h"
#include <linux/pfk.h>
-
+#include <linux/atomic.h>
+#include <linux/wait.h>
#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
@@ -114,6 +115,9 @@ struct ice_device {
struct qcom_ice_bus_vote bus_vote;
ktime_t ice_reset_start_time;
ktime_t ice_reset_complete_time;
+ atomic_t is_ice_suspended;
+ atomic_t is_ice_busy;
+ wait_queue_head_t block_suspend_ice_queue;
};
static int qti_ice_setting_config(struct request *req,
@@ -807,7 +811,6 @@ static int qcom_ice_probe(struct platform_device *pdev)
* operation arrives.
*/
ice_dev->is_ice_enabled = false;
-
platform_set_drvdata(pdev, ice_dev);
list_add_tail(&ice_dev->list, &ice_devices);
@@ -842,6 +845,21 @@ static int qcom_ice_remove(struct platform_device *pdev)
static int qcom_ice_suspend(struct platform_device *pdev)
{
+ struct ice_device *ice_dev;
+
+ ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+ if (!ice_dev)
+ return -EINVAL;
+
+ if (atomic_read(&ice_dev->is_ice_busy) != 0)
+ wait_event_interruptible_timeout(
+ ice_dev->block_suspend_ice_queue,
+ atomic_read(&ice_dev->is_ice_busy) != 0,
+ msecs_to_jiffies(1000));
+
+ atomic_set(&ice_dev->is_ice_suspended, 1);
+
return 0;
}
@@ -1095,7 +1113,7 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev)
err = -EFAULT;
goto out;
}
-
+ init_waitqueue_head(&ice_dev->block_suspend_ice_queue);
qcom_ice_low_power_mode_enable(ice_dev);
qcom_ice_optimization_enable(ice_dev);
qcom_ice_config_proc_ignore(ice_dev);
@@ -1103,7 +1121,8 @@ static int qcom_ice_finish_init(struct ice_device *ice_dev)
qcom_ice_enable(ice_dev);
ice_dev->is_ice_enabled = true;
qcom_ice_enable_intr(ice_dev);
-
+ atomic_set(&ice_dev->is_ice_suspended, 0);
+ atomic_set(&ice_dev->is_ice_busy, 0);
out:
return err;
}
@@ -1200,7 +1219,6 @@ static int qcom_ice_resume(struct platform_device *pdev)
* after receiving this event
*/
struct ice_device *ice_dev;
-
ice_dev = platform_get_drvdata(pdev);
if (!ice_dev)
@@ -1214,7 +1232,7 @@ static int qcom_ice_resume(struct platform_device *pdev)
*/
qcom_ice_enable(ice_dev);
}
-
+ atomic_set(&ice_dev->is_ice_suspended, 0);
return 0;
}
@@ -1456,11 +1474,18 @@ static int qcom_ice_config_start(struct platform_device *pdev,
bool is_pfe = false;
unsigned long sec_end = 0;
sector_t data_size;
+ struct ice_device *ice_dev;
if (!pdev || !req) {
pr_err("%s: Invalid params passed\n", __func__);
return -EINVAL;
}
+ ice_dev = platform_get_drvdata(pdev);
+
+ if (!ice_dev) {
+ pr_err("%s: INVALID ice_dev\n", __func__);
+ return -EINVAL;
+ }
/*
* It is not an error to have a request with no bio
@@ -1477,7 +1502,18 @@ static int qcom_ice_config_start(struct platform_device *pdev,
return 0;
}
+ if (atomic_read(&ice_dev->is_ice_suspended) == 1)
+ return -EINVAL;
+
+ if (async)
+ atomic_set(&ice_dev->is_ice_busy, 1);
+
ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
+
+ if (async) {
+ atomic_set(&ice_dev->is_ice_busy, 0);
+ wake_up_interruptible(&ice_dev->block_suspend_ice_queue);
+ }
if (is_pfe) {
if (ret) {
if (ret != -EBUSY && ret != -EAGAIN)
@@ -1544,7 +1580,6 @@ static int qcom_ice_config_end(struct request *req)
/* It is not an error to have a request with no bio */
return 0;
}
-
ret = pfk_load_key_end(req->bio, &is_pfe);
if (is_pfe) {
if (ret != 0)
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index 6c53704aac9c..e17cf0de19ef 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -232,10 +232,11 @@ static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
case DEVFREQ_GOV_SUSPEND:
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
- priv->bus.total_time = 0;
- priv->bus.gpu_time = 0;
- priv->bus.ram_time = 0;
+ if (priv) {
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ }
}
break;
default:
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index 827b84b71cdc..d09458b7d840 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -184,6 +184,7 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
esoc_mdm_log("Modem not up. Ignoring.\n");
if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
return;
+ mdm_drv->mode = CRASH;
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
break;
case ESOC_REQ_ENG_ON:
@@ -205,8 +206,7 @@ static void mdm_ssr_fn(struct work_struct *work)
mdm_wait_for_status_low(mdm, false);
- esoc_mdm_log("Starting SSR work and setting crash state\n");
- mdm_drv->mode = CRASH;
+ esoc_mdm_log("Starting SSR work\n");
/*
* If restarting esoc fails, the SSR framework triggers a kernel panic
@@ -289,12 +289,14 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
container_of(crashed_subsys, struct esoc_clink, subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+ struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
esoc_mdm_log("Shutdown request from SSR\n");
mutex_lock(&mdm_drv->poff_lock);
if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
esoc_mdm_log("Shutdown in crash mode\n");
+ mdm_wait_for_status_low(mdm, false);
if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) {
/* We want to mask debug command.
* In this case return success
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index acb205dec57b..259aca953f15 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -36,6 +36,7 @@ struct dp_debug_private {
u32 dpcd_size;
u32 mst_con_id;
+ bool hotplug;
char exe_mode[SZ_32];
char reg_dump[SZ_32];
@@ -351,9 +352,17 @@ static ssize_t dp_debug_write_hpd(struct file *file,
goto end;
hpd &= hpd_data_mask;
+ debug->hotplug = !!(hpd & BIT(0));
debug->dp_debug.psm_enabled = !!(hpd & BIT(1));
+ /*
+ * print hotplug value as this code is executed
+ * only while running in debug mode which is manually
+ * triggered by a tester or a script.
+ */
+ pr_info("%s\n", debug->hotplug ? "[CONNECT]" : "[DISCONNECT]");
+
debug->hpd->simulate_connect(debug->hpd, !!(hpd & BIT(0)));
end:
return len;
@@ -1479,6 +1488,13 @@ static void dp_debug_set_sim_mode(struct dp_debug_private *debug, bool sim)
debug->aux->set_sim_mode(debug->aux, true,
debug->edid, debug->dpcd);
} else {
+
+ if (debug->hotplug) {
+ pr_warn("sim mode off before hotplug disconnect\n");
+ debug->hpd->simulate_connect(debug->hpd, false);
+ debug->hotplug = false;
+ }
+
debug->aux->abort(debug->aux, false);
debug->ctrl->abort(debug->ctrl, false);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index e8e6ad472592..d4223fbe913a 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -78,6 +78,7 @@ struct dp_display_private {
atomic_t aborted;
struct platform_device *pdev;
+ struct usbpd *pd;
struct device_node *aux_switch_node;
struct dentry *root;
struct completion notification_comp;
@@ -1046,10 +1047,8 @@ static int dp_display_usbpd_disconnect_cb(struct device *dev)
goto end;
}
- mutex_lock(&dp->session_lock);
if (dp->debug->psm_enabled && dp->core_initialized)
dp->link->psm_config(dp->link, &dp->panel->link_info, true);
- mutex_unlock(&dp->session_lock);
dp_display_disconnect_sync(dp);
@@ -1390,7 +1389,7 @@ static int dp_init_sub_modules(struct dp_display_private *dp)
cb->disconnect = dp_display_usbpd_disconnect_cb;
cb->attention = dp_display_usbpd_attention_cb;
- dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd, cb);
+ dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd, dp->pd, cb);
if (IS_ERR(dp->hpd)) {
rc = PTR_ERR(dp->hpd);
pr_err("failed to initialize hpd, rc = %d\n", rc);
@@ -2157,6 +2156,28 @@ static int dp_display_create_workqueue(struct dp_display_private *dp)
return 0;
}
+static int dp_display_usbpd_get(struct dp_display_private *dp)
+{
+ int rc = 0;
+ char const *phandle = "qcom,dp-usbpd-detection";
+
+ dp->pd = devm_usbpd_get_by_phandle(&dp->pdev->dev, phandle);
+ if (IS_ERR(dp->pd)) {
+ rc = PTR_ERR(dp->pd);
+
+ /*
+ * If the pd handle is not present(if return is -ENXIO) then the
+ * platform might be using a direct hpd connection from sink.
+ * So, return success in this case.
+ */
+ if (rc == -ENXIO)
+ return 0;
+
+ pr_err("usbpd phandle failed (%ld)\n", PTR_ERR(dp->pd));
+ }
+ return rc;
+}
+
static int dp_display_fsa4480_callback(struct notifier_block *self,
unsigned long event, void *data)
{
@@ -2612,6 +2633,10 @@ static int dp_display_probe(struct platform_device *pdev)
memset(&dp->mst, 0, sizeof(dp->mst));
atomic_set(&dp->aborted, 0);
+ rc = dp_display_usbpd_get(dp);
+ if (rc)
+ goto error;
+
rc = dp_display_init_aux_switch(dp);
if (rc) {
rc = -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index 2d0a1aa6a235..2711fe2867a6 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -48,7 +48,8 @@ static void dp_hpd_isr(struct dp_hpd *dp_hpd)
}
struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
- struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb)
+ struct dp_catalog_hpd *catalog, struct usbpd *pd,
+ struct dp_hpd_cb *cb)
{
struct dp_hpd *dp_hpd;
@@ -67,7 +68,7 @@ struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
}
dp_hpd->type = DP_HPD_GPIO;
} else {
- dp_hpd = dp_usbpd_get(dev, cb);
+ dp_hpd = dp_usbpd_init(dev, pd, cb);
if (IS_ERR(dp_hpd)) {
pr_err("failed to get usbpd\n");
goto out;
@@ -93,7 +94,7 @@ void dp_hpd_put(struct dp_hpd *dp_hpd)
switch (dp_hpd->type) {
case DP_HPD_USBPD:
- dp_usbpd_put(dp_hpd);
+ dp_usbpd_deinit(dp_hpd);
break;
case DP_HPD_GPIO:
dp_gpio_hpd_put(dp_hpd);
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
index ff667143fcdb..bff9400798a6 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -87,14 +87,17 @@ struct dp_hpd {
* dp_hpd_get() - configure and get the DisplayPlot HPD module data
*
* @dev: device instance of the caller
- * @parser: DP parser
+ * @parser: pointer to DP parser module
+ * @catalog: pointer to DP catalog module
+ * @pd: handle for the ubspd driver data
* @cb: callback function for HPD response
* return: pointer to allocated hpd module data
*
* This function sets up the hpd module
*/
struct dp_hpd *dp_hpd_get(struct device *dev, struct dp_parser *parser,
- struct dp_catalog_hpd *catalog, struct dp_hpd_cb *cb);
+ struct dp_catalog_hpd *catalog, struct usbpd *pd,
+ struct dp_hpd_cb *cb);
/**
* dp_hpd_put()
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.c b/drivers/gpu/drm/msm/dp/dp_usbpd.c
index a9da3219e247..ea9ba7c75e5e 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.c
@@ -523,11 +523,10 @@ static void dp_usbpd_wakeup_phy(struct dp_hpd *dp_hpd, bool wakeup)
usbpd_vdm_in_suspend(usbpd->pd, wakeup);
}
-struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb)
+struct dp_hpd *dp_usbpd_init(struct device *dev, struct usbpd *pd,
+ struct dp_hpd_cb *cb)
{
int rc = 0;
- const char *pd_phandle = "qcom,dp-usbpd-detection";
- struct usbpd *pd = NULL;
struct dp_usbpd_private *usbpd;
struct dp_usbpd *dp_usbpd;
struct usbpd_svid_handler svid_handler = {
@@ -538,19 +537,12 @@ struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb)
.disconnect = &dp_usbpd_disconnect_cb,
};
- if (!cb) {
- pr_err("invalid cb data\n");
+ if (IS_ERR(pd) || !cb) {
+ pr_err("invalid data\n");
rc = -EINVAL;
goto error;
}
- pd = devm_usbpd_get_by_phandle(dev, pd_phandle);
- if (IS_ERR(pd)) {
- pr_err("usbpd phandle failed (%ld)\n", PTR_ERR(pd));
- rc = PTR_ERR(pd);
- goto error;
- }
-
usbpd = devm_kzalloc(dev, sizeof(*usbpd), GFP_KERNEL);
if (!usbpd) {
rc = -ENOMEM;
@@ -573,7 +565,7 @@ error:
return ERR_PTR(rc);
}
-void dp_usbpd_put(struct dp_hpd *dp_hpd)
+void dp_usbpd_deinit(struct dp_hpd *dp_hpd)
{
struct dp_usbpd *dp_usbpd;
struct dp_usbpd_private *usbpd;
diff --git a/drivers/gpu/drm/msm/dp/dp_usbpd.h b/drivers/gpu/drm/msm/dp/dp_usbpd.h
index 959188d406e4..01200cbe727f 100644
--- a/drivers/gpu/drm/msm/dp/dp_usbpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_usbpd.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -57,9 +57,10 @@ struct dp_usbpd {
};
/**
- * dp_usbpd_get() - setup usbpd module
+ * dp_usbpd_init() - initialize the usbpd module
*
* @dev: device instance of the caller
+ * @pd: handle for the usbpd driver data
* @cb: struct containing callback function pointers.
*
* This function allows the client to initialize the usbpd
@@ -68,7 +69,15 @@ struct dp_usbpd {
* sink/usb device. This module will notify the client using
* the callback functions about the connection and status.
*/
-struct dp_hpd *dp_usbpd_get(struct device *dev, struct dp_hpd_cb *cb);
+struct dp_hpd *dp_usbpd_init(struct device *dev, struct usbpd *pd,
+ struct dp_hpd_cb *cb);
-void dp_usbpd_put(struct dp_hpd *pd);
+/**
+ * dp_usbpd_deinit() - deinitialize the usbpd module
+ *
+ * @pd: pointer to the dp_hpd base module
+ *
+ * This function will cleanup the usbpd module
+ */
+void dp_usbpd_deinit(struct dp_hpd *pd);
#endif /* _DP_USBPD_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index 47e347693452..b09b43342295 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -118,7 +118,7 @@ static ssize_t debugfs_state_info_read(struct file *file,
dsi_ctrl->clk_freq.pix_clk_rate,
dsi_ctrl->clk_freq.esc_clk_rate);
- /* TODO: make sure that this does not exceed 4K */
+ len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
@@ -173,8 +173,7 @@ static ssize_t debugfs_reg_dump_read(struct file *file,
return rc;
}
-
- /* TODO: make sure that this does not exceed 4K */
+ len = min_t(size_t, len, SZ_4K);
if (copy_to_user(buff, buf, len)) {
kfree(buf);
return -EFAULT;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index c6b2d35fe732..41cdb4d2faff 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -2282,11 +2282,20 @@ error:
return rc;
}
-static int dsi_display_set_clk_src(struct dsi_display *display)
+static int dsi_display_set_clk_src(struct dsi_display *display, bool on)
{
int rc = 0;
int i;
struct dsi_display_ctrl *m_ctrl, *ctrl;
+ struct dsi_clk_link_set *src;
+
+ /* if XO clk is defined, select XO clk src when DSI is disabled */
+ if (on)
+ src = &display->clock_info.mux_clks;
+ else if (display->clock_info.xo_clks.byte_clk)
+ src = &display->clock_info.xo_clks;
+ else
+ return 0;
/*
* In case of split DSI usecases, the clock for master controller should
@@ -2295,8 +2304,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
*/
m_ctrl = &display->ctrl[display->clk_master_idx];
- rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
- &display->clock_info.mux_clks);
+ rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl, src);
if (rc) {
pr_err("[%s] failed to set source clocks for master, rc=%d\n",
display->name, rc);
@@ -2309,8 +2317,7 @@ static int dsi_display_set_clk_src(struct dsi_display *display)
if (!ctrl->ctrl || (ctrl == m_ctrl))
continue;
- rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
- &display->clock_info.mux_clks);
+ rc = dsi_ctrl_set_clock_source(ctrl->ctrl, src);
if (rc) {
pr_err("[%s] failed to set source clocks, rc=%d\n",
display->name, rc);
@@ -3000,12 +3007,17 @@ static int dsi_display_clocks_init(struct dsi_display *display)
struct dsi_clk_link_set *src = &display->clock_info.src_clks;
struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+ struct dsi_clk_link_set *xo = &display->clock_info.xo_clks;
struct dsi_dyn_clk_caps *dyn_clk_caps = &(display->panel->dyn_clk_caps);
num_clk = dsi_display_get_clocks_count(display);
pr_debug("clk count=%d\n", num_clk);
+ dsi_clk = devm_clk_get(&display->pdev->dev, "xo_clk");
+ if (!IS_ERR_OR_NULL(dsi_clk))
+ xo->byte_clk = xo->pixel_clk = dsi_clk;
+
for (i = 0; i < num_clk; i++) {
dsi_display_get_clock_name(display, i, &clk_name);
@@ -6514,7 +6526,7 @@ static int dsi_display_pre_switch(struct dsi_display *display)
goto error_ctrl_clk_off;
}
- rc = dsi_display_set_clk_src(display);
+ rc = dsi_display_set_clk_src(display, true);
if (rc) {
pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
display->name, rc);
@@ -6906,7 +6918,7 @@ int dsi_display_prepare(struct dsi_display *display)
}
}
- rc = dsi_display_set_clk_src(display);
+ rc = dsi_display_set_clk_src(display, true);
if (rc) {
pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
display->name, rc);
@@ -7134,21 +7146,11 @@ int dsi_display_pre_kickoff(struct drm_connector *connector,
{
int rc = 0;
int i;
- bool enable;
/* check and setup MISR */
if (display->misr_enable)
_dsi_display_setup_misr(display);
- if (params->qsync_update) {
- enable = (params->qsync_mode > 0) ? true : false;
- rc = dsi_display_qsync(display, enable);
- if (rc)
- pr_err("%s failed to send qsync commands",
- __func__);
- SDE_EVT32(params->qsync_mode, rc);
- }
-
rc = dsi_display_set_roi(display, params->rois);
/* dynamic DSI clock setting */
@@ -7229,6 +7231,29 @@ error_out:
return rc;
}
+int dsi_display_pre_commit(void *display,
+ struct msm_display_conn_params *params)
+{
+ bool enable = false;
+ int rc = 0;
+
+ if (!display || !params) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (params->qsync_update) {
+ enable = (params->qsync_mode > 0) ? true : false;
+ rc = dsi_display_qsync(display, enable);
+ if (rc)
+ pr_err("%s failed to send qsync commands\n",
+ __func__);
+ SDE_EVT32(params->qsync_mode, rc);
+ }
+
+ return rc;
+}
+
int dsi_display_enable(struct dsi_display *display)
{
int rc = 0;
@@ -7501,6 +7526,8 @@ int dsi_display_unprepare(struct dsi_display *display)
pr_err("[%s] panel post-unprepare failed, rc=%d\n",
display->name, rc);
+ dsi_display_set_clk_src(display, false);
+
mutex_unlock(&display->display_lock);
/* Free up DSI ERROR event callback */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
index b4a5cf9fb40e..15b618da4714 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -115,11 +115,13 @@ struct dsi_display_boot_param {
* @src_clks: Source clocks for DSI display.
* @mux_clks: Mux clocks used for DFPS.
* @shadow_clks: Used for DFPS.
+ * @xo_clks: XO clocks for DSI display
*/
struct dsi_display_clk_info {
struct dsi_clk_link_set src_clks;
struct dsi_clk_link_set mux_clks;
struct dsi_clk_link_set shadow_clks;
+ struct dsi_clk_link_set xo_clks;
};
/**
@@ -668,6 +670,15 @@ int dsi_display_set_power(struct drm_connector *connector,
int dsi_display_pre_kickoff(struct drm_connector *connector,
struct dsi_display *display,
struct msm_display_kickoff_params *params);
+/*
+ * dsi_display_pre_commit - program pre commit features
+ * @display: Pointer to private display structure
+ * @params: Parameters for pre commit time programming
+ * Returns: Zero on success
+ */
+int dsi_display_pre_commit(void *display,
+ struct msm_display_conn_params *params);
+
/**
* dsi_display_get_dst_format() - get dst_format from DSI display
* @connector: Pointer to drm connector structure
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 0e58dd8ed4d9..73ed1e821d29 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -396,6 +396,16 @@ static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
(!crtc_state->active_changed ||
display->is_cont_splash_enabled))
dsi_mode.dsi_mode_flags |= DSI_MODE_FLAG_DMS;
+
+ /* Reject seemless transition when active changed. */
+ if (crtc_state->active_changed &&
+ ((dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_VRR) ||
+ (dsi_mode.dsi_mode_flags & DSI_MODE_FLAG_DYN_CLK))) {
+ pr_err("seamless upon active changed 0x%x %d\n",
+ dsi_mode.dsi_mode_flags,
+ crtc_state->active_changed);
+ return false;
+ }
}
/* convert back to drm mode, propagating the private info & flags */
@@ -881,6 +891,17 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
return dsi_display_pre_kickoff(connector, display, params);
}
+int dsi_conn_prepare_commit(void *display,
+ struct msm_display_conn_params *params)
+{
+ if (!display || !params) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return dsi_display_pre_commit(display, params);
+}
+
void dsi_conn_enable_event(struct drm_connector *connector,
uint32_t event_idx, bool enable, void *display)
{
@@ -895,7 +916,8 @@ void dsi_conn_enable_event(struct drm_connector *connector,
event_idx, &event_info, enable);
}
-int dsi_conn_post_kickoff(struct drm_connector *connector)
+int dsi_conn_post_kickoff(struct drm_connector *connector,
+ struct msm_display_conn_params *params)
{
struct drm_encoder *encoder;
struct dsi_bridge *c_bridge;
@@ -903,6 +925,7 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
struct dsi_display *display;
struct dsi_display_ctrl *m_ctrl, *ctrl;
int i, rc = 0;
+ bool enable;
if (!connector || !connector->state) {
pr_err("invalid connector or connector state");
@@ -948,6 +971,13 @@ int dsi_conn_post_kickoff(struct drm_connector *connector)
/* ensure dynamic clk switch flag is reset */
c_bridge->dsi_mode.dsi_mode_flags &= ~DSI_MODE_FLAG_DYN_CLK;
+ if (params->qsync_update) {
+ enable = (params->qsync_mode > 0) ? true : false;
+ display_for_each_ctrl(i, display) {
+ dsi_ctrl_setup_avr(display->ctrl[i].ctrl, enable);
+ }
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
index 1af30e1d8090..e0d89f8b57ba 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -127,9 +127,11 @@ int dsi_conn_pre_kickoff(struct drm_connector *connector,
/**
* dsi_display_post_kickoff - program post kickoff-time features
* @connector: Pointer to drm connector structure
+ * @params: Parameters for post kickoff programming
* Returns: Zero on success
*/
-int dsi_conn_post_kickoff(struct drm_connector *connector);
+int dsi_conn_post_kickoff(struct drm_connector *connector,
+ struct msm_display_conn_params *params);
/**
* dsi_convert_to_drm_mode - Update drm mode with dsi mode information
@@ -141,4 +143,15 @@ void dsi_convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
u64 dsi_drm_find_bit_clk_rate(void *display,
const struct drm_display_mode *drm_mode);
+
+/**
+ * dsi_conn_prepare_commit - program pre commit time features
+ * @display: Pointer to private display structure
+ * @params: Parameters for pre commit programming
+ * Returns: Zero on success
+ */
+int dsi_conn_prepare_commit(void *display,
+ struct msm_display_conn_params *params);
+
+
#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index de28a2e8964d..6ce144458aec 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -3239,12 +3239,14 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
if (rc)
pr_err("failed to parse dfps configuration, rc=%d\n", rc);
- if (!(panel->dfps_caps.dfps_support)) {
- /* qsync and dfps are mutually exclusive features */
- rc = dsi_panel_parse_qsync_caps(panel, of_node);
- if (rc)
- pr_err("failed to parse qsync features, rc=%d\n", rc);
- }
+ rc = dsi_panel_parse_qsync_caps(panel, of_node);
+ if (rc)
+ pr_err("failed to parse qsync features, rc=%d\n", rc);
+
+ /* allow qsync support only if DFPS is with VFP approach */
+ if ((panel->dfps_caps.dfps_support) &&
+ !(panel->dfps_caps.type == DSI_DFPS_IMMEDIATE_VFP))
+ panel->qsync_min_fps = 0;
rc = dsi_panel_parse_dyn_clk_caps(panel);
if (rc)
@@ -3273,7 +3275,6 @@ struct dsi_panel *dsi_panel_get(struct device *parent,
goto error;
}
-
rc = dsi_panel_parse_misc_features(panel);
if (rc)
pr_err("failed to parse misc features, rc=%d\n", rc);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
index e4b369e2f8af..5908e54a1702 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -281,6 +281,32 @@ static int dsi_phy_parse_dt_per_lane_cfgs(struct platform_device *pdev,
return rc;
}
+static int dsi_phy_parse_dt_per_lane_bits(struct platform_device *pdev,
+ u8 *bits,
+ char *property)
+{
+ int rc = 0, i = 0;
+ const u8 *data;
+ u32 len = 0;
+
+ data = of_get_property(pdev->dev.of_node, property, &len);
+ if (!data)
+ return 0;
+
+ if (len != DSI_LANE_MAX) {
+ pr_err("incorrect phy %s settings, exp=%d, act=%d\n",
+ property, DSI_LANE_MAX, len);
+ return -EINVAL;
+ }
+
+ *bits = 0;
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+ *bits |= (data[i] & 0x01) << i;
+
+ return rc;
+}
+
static int dsi_phy_settings_init(struct platform_device *pdev,
struct msm_dsi_phy *phy)
{
@@ -316,6 +342,13 @@ static int dsi_phy_settings_init(struct platform_device *pdev,
}
}
+ rc = dsi_phy_parse_dt_per_lane_bits(pdev, &phy->cfg.lane_pnswap,
+ "qcom,platform-lane-pnswap");
+ if (rc) {
+ pr_err("failed to parse lane P/N swap map, rc=%d\n", rc);
+ goto err;
+ }
+
/* Actual timing values are dependent on panel */
timing->count_per_lane = phy->ver_info->timing_cfg_count;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
index b8e900e390f9..6b5b1dc643cb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -94,6 +94,7 @@ struct dsi_phy_per_lane_cfgs {
* @regulators: Regulator settings for lanes.
* @pll_source: PLL source.
* @lane_map: DSI logical to PHY lane mapping.
+ * @lane_pnswap: P/N swap status on each lane.
* @force_clk_lane_hs:Boolean whether to force clock lane in HS mode.
* @bit_clk_rate_hz: DSI bit clk rate in HZ.
*/
@@ -105,6 +106,7 @@ struct dsi_phy_cfg {
struct dsi_phy_per_lane_cfgs regulators;
enum dsi_phy_pll_source pll_source;
struct dsi_lane_map lane_map;
+ u8 lane_pnswap;
bool force_clk_lane_hs;
unsigned long bit_clk_rate_hz;
};
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
index f5f1ba7f1e02..b0db818ce39d 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -131,7 +131,6 @@ static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy,
* to the logical data lane 0
*/
DSI_W32(phy, DSIPHY_LNX_LPRX_CTRL(i), 0);
- DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i), 0x0);
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, cfg, true);
@@ -141,6 +140,8 @@ static void dsi_phy_hw_v4_0_lane_settings(struct dsi_phy_hw *phy,
DSI_W32(phy, DSIPHY_LNX_CFG1(i), cfg->lanecfg.lane[i][1]);
DSI_W32(phy, DSIPHY_LNX_CFG2(i), cfg->lanecfg.lane[i][2]);
DSI_W32(phy, DSIPHY_LNX_TX_DCTRL(i), tx_dctrl[i]);
+ DSI_W32(phy, DSIPHY_LNX_PIN_SWAP(i),
+ (cfg->lane_pnswap >> i) & 0x1);
}
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 976744adfd09..d87a2dcb7734 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -531,6 +531,14 @@ struct msm_roi_list {
struct msm_display_kickoff_params {
struct msm_roi_list *rois;
struct drm_msm_ext_hdr_metadata *hdr_meta;
+};
+
+/**
+ * struct - msm_display_conn_params - info of dpu display features
+ * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
+ * @qsync_update: Qsync settings were changed/updated
+ */
+struct msm_display_conn_params {
uint32_t qsync_mode;
bool qsync_update;
};
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 5dad84b2f4eb..0359f7d2f49c 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -64,6 +64,7 @@ static const struct drm_prop_enum_list e_power_mode[] = {
static const struct drm_prop_enum_list e_qsync_mode[] = {
{SDE_RM_QSYNC_DISABLED, "none"},
{SDE_RM_QSYNC_CONTINUOUS_MODE, "continuous"},
+ {SDE_RM_QSYNC_ONE_SHOT_MODE, "one_shot"},
};
static int sde_backlight_device_update_status(struct backlight_device *bd)
@@ -564,21 +565,30 @@ static int _sde_connector_update_bl_scale(struct sde_connector *c_conn)
void sde_connector_set_qsync_params(struct drm_connector *connector)
{
- struct sde_connector *c_conn = to_sde_connector(connector);
- u32 qsync_propval;
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ u32 qsync_propval = 0;
+ bool prop_dirty;
if (!connector)
return;
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector->state);
c_conn->qsync_updated = false;
- qsync_propval = sde_connector_get_property(c_conn->base.state,
- CONNECTOR_PROP_QSYNC_MODE);
- if (qsync_propval != c_conn->qsync_mode) {
- SDE_DEBUG("updated qsync mode %d -> %d\n", c_conn->qsync_mode,
- qsync_propval);
- c_conn->qsync_updated = true;
- c_conn->qsync_mode = qsync_propval;
+ prop_dirty = msm_property_is_dirty(&c_conn->property_info,
+ &c_state->property_state,
+ CONNECTOR_PROP_QSYNC_MODE);
+ if (prop_dirty) {
+ qsync_propval = sde_connector_get_property(c_conn->base.state,
+ CONNECTOR_PROP_QSYNC_MODE);
+ if (qsync_propval != c_conn->qsync_mode) {
+ SDE_DEBUG("updated qsync mode %d -> %d\n",
+ c_conn->qsync_mode, qsync_propval);
+ c_conn->qsync_updated = true;
+ c_conn->qsync_mode = qsync_propval;
+ }
}
}
@@ -659,22 +669,53 @@ int sde_connector_pre_kickoff(struct drm_connector *connector)
params.rois = &c_state->rois;
params.hdr_meta = &c_state->hdr_meta;
- params.qsync_update = false;
+
+ SDE_EVT32_VERBOSE(connector->base.id);
+
+ rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+
+end:
+ return rc;
+}
+
+int sde_connector_prepare_commit(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ struct msm_display_conn_params params;
+ int rc;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector->state);
+ if (!c_conn->display) {
+ SDE_ERROR("invalid connector display\n");
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.prepare_commit)
+ return 0;
+
+ memset(&params, 0, sizeof(params));
if (c_conn->qsync_updated) {
params.qsync_mode = c_conn->qsync_mode;
params.qsync_update = true;
- SDE_EVT32(connector->base.id, params.qsync_mode);
}
- SDE_EVT32_VERBOSE(connector->base.id);
+ rc = c_conn->ops.prepare_commit(c_conn->display, &params);
- rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+ SDE_EVT32(connector->base.id, params.qsync_mode,
+ params.qsync_update, rc);
-end:
return rc;
}
+
void sde_connector_helper_bridge_disable(struct drm_connector *connector)
{
int rc;
@@ -1209,6 +1250,10 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
c_conn->bl_scale_ad = val;
c_conn->bl_scale_dirty = true;
break;
+ case CONNECTOR_PROP_QSYNC_MODE:
+ msm_property_set_dirty(&c_conn->property_info,
+ &c_state->property_state, idx);
+ break;
default:
break;
}
@@ -1805,13 +1850,34 @@ static int sde_connector_atomic_check(struct drm_connector *connector,
struct drm_connector_state *new_conn_state)
{
struct sde_connector *c_conn;
+ struct drm_crtc_state *crtc_state;
+ struct sde_connector_state *c_state;
+ bool qsync_dirty;
if (!connector) {
SDE_ERROR("invalid connector\n");
- return 0;
+ return -EINVAL;
+ }
+
+ if (!new_conn_state) {
+ SDE_ERROR("invalid connector state\n");
+ return -EINVAL;
}
c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(new_conn_state);
+
+ crtc_state = drm_atomic_get_new_crtc_state(new_conn_state->state,
+ new_conn_state->crtc);
+
+ qsync_dirty = msm_property_is_dirty(&c_conn->property_info,
+ &c_state->property_state,
+ CONNECTOR_PROP_QSYNC_MODE);
+
+ if (drm_atomic_crtc_needs_modeset(crtc_state) && qsync_dirty) {
+ SDE_ERROR("invalid qsync update during modeset\n");
+ return -EINVAL;
+ }
if (c_conn->ops.atomic_check)
return c_conn->ops.atomic_check(connector,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 5d40836e3165..2d43c4a65de7 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -222,9 +222,12 @@ struct sde_connector_ops {
/**
* post_kickoff - display to program post kickoff-time features
* @connector: Pointer to drm connector structure
+ * @params: Parameter bundle of connector-stored information for
+ * post kickoff programming into the display
* Returns: Zero on success
*/
- int (*post_kickoff)(struct drm_connector *connector);
+ int (*post_kickoff)(struct drm_connector *connector,
+ struct msm_display_conn_params *params);
/**
* post_open - calls connector to process post open functionalities
@@ -310,6 +313,16 @@ struct sde_connector_ops {
* Returns: v_front_porch on success error-code on failure
*/
int (*get_panel_vfp)(void *display, int h_active, int v_active);
+
+ /**
+ * prepare_commit - trigger display to program pre-commit time features
+ * @display: Pointer to private display structure
+ * @params: Parameter bundle of connector-stored information for
+ * pre commit time programming into the display
+ * Returns: Zero on success
+ */
+ int (*prepare_commit)(void *display,
+ struct msm_display_conn_params *params);
};
/**
@@ -766,6 +779,13 @@ int sde_connector_register_custom_event(struct sde_kms *kms,
int sde_connector_pre_kickoff(struct drm_connector *connector);
/**
+ * sde_connector_prepare_commit - trigger commit time feature programming
+ * @connector: Pointer to drm connector object
+ * Returns: Zero on success
+ */
+int sde_connector_prepare_commit(struct drm_connector *connector);
+
+/**
* sde_connector_needs_offset - adjust the output fence offset based on
* display type
* @connector: Pointer to drm connector object
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 68362d3e0a0a..73c1532d42b9 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -5552,6 +5552,43 @@ end:
return rc;
}
+/**
+ * sde_crtc_get_num_datapath - get the number of datapath active
+ * of primary connector
+ * @crtc: Pointer to DRM crtc object
+ * @connector: Pointer to DRM connector object of WB in CWB case
+ */
+int sde_crtc_get_num_datapath(struct drm_crtc *crtc,
+ struct drm_connector *connector)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_connector_state *sde_conn_state = NULL;
+ struct drm_connector *conn;
+ struct drm_connector_list_iter conn_iter;
+
+ if (!sde_crtc || !connector) {
+ SDE_DEBUG("Invalid argument\n");
+ return 0;
+ }
+
+ if (sde_crtc->num_mixers)
+ return sde_crtc->num_mixers;
+
+ drm_connector_list_iter_begin(crtc->dev, &conn_iter);
+ drm_for_each_connector_iter(conn, &conn_iter) {
+ if (conn->state && conn->state->crtc == crtc &&
+ conn != connector)
+ sde_conn_state = to_sde_connector_state(conn->state);
+ }
+
+ drm_connector_list_iter_end(&conn_iter);
+
+ if (sde_conn_state)
+ return sde_conn_state->mode_info.topology.num_lm;
+
+ return 0;
+}
+
int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
{
struct sde_crtc *sde_crtc;
@@ -5589,6 +5626,8 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
struct drm_device *dev;
struct sde_kms_info *info;
struct sde_kms *sde_kms;
+ int i, j;
+
static const struct drm_prop_enum_list e_secure_level[] = {
{SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
{SDE_DRM_SEC_ONLY, "sec_only"},
@@ -5800,6 +5839,37 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc,
if (sde_kms->perf.max_core_clk_rate)
sde_kms_info_add_keyint(info, "max_mdp_clk",
sde_kms->perf.max_core_clk_rate);
+
+ for (i = 0; i < catalog->limit_count; i++) {
+ sde_kms_info_add_keyint(info,
+ catalog->limit_cfg[i].name,
+ catalog->limit_cfg[i].lmt_case_cnt);
+
+ for (j = 0; j < catalog->limit_cfg[i].lmt_case_cnt; j++) {
+ sde_kms_info_add_keyint(info,
+ catalog->limit_cfg[i].vector_cfg[j].usecase,
+ catalog->limit_cfg[i].vector_cfg[j].value);
+ }
+
+ if (!strcmp(catalog->limit_cfg[i].name,
+ "sspp_linewidth_usecases"))
+ sde_kms_info_add_keyint(info,
+ "sspp_linewidth_values",
+ catalog->limit_cfg[i].lmt_vec_cnt);
+ else if (!strcmp(catalog->limit_cfg[i].name,
+ "sde_bwlimit_usecases"))
+ sde_kms_info_add_keyint(info,
+ "sde_bwlimit_values",
+ catalog->limit_cfg[i].lmt_vec_cnt);
+
+ for (j = 0; j < catalog->limit_cfg[i].lmt_vec_cnt; j++) {
+ sde_kms_info_add_keyint(info, "limit_usecase",
+ catalog->limit_cfg[i].value_cfg[j].use_concur);
+ sde_kms_info_add_keyint(info, "limit_value",
+ catalog->limit_cfg[i].value_cfg[j].value);
+ }
+ }
+
sde_kms_info_add_keystr(info, "core_ib_ff",
catalog->perf.core_ib_ff);
sde_kms_info_add_keystr(info, "core_clk_ff",
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index 249820cea539..aea5290c0538 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -515,17 +515,6 @@ static inline int sde_crtc_get_mixer_height(struct sde_crtc *sde_crtc,
}
/**
- * sde_crtc_get_num_datapath - get the number of datapath active
- * @crtc: Pointer to drm crtc object
- */
-static inline int sde_crtc_get_num_datapath(struct drm_crtc *crtc)
-{
- struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
-
- return sde_crtc ? sde_crtc->num_mixers : 0;
-}
-
-/**
* sde_crtc_get_rotator_op_mode - get the rotator op mode from the crtc state
* @crtc: Pointer to drm crtc object
*/
@@ -861,4 +850,13 @@ int sde_crtc_calc_vpadding_param(struct drm_crtc_state *state,
uint32_t crtc_y, uint32_t crtc_h, uint32_t *padding_y,
uint32_t *padding_start, uint32_t *padding_height);
+/**
+ * sde_crtc_get_num_datapath - get the number of datapath active
+ * of primary connector
+ * @crtc: Pointer to DRM crtc object
+ * @connector: Pointer to DRM connector object of WB in CWB case
+ */
+int sde_crtc_get_num_datapath(struct drm_crtc *crtc,
+ struct drm_connector *connector);
+
#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 17227bccc3d4..0b27a5a0a645 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -4579,11 +4579,6 @@ int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
else
ln_cnt1 = -EINVAL;
- /* update the qsync parameters for the current frame */
- if (sde_enc->cur_master)
- sde_connector_set_qsync_params(
- sde_enc->cur_master->connector);
-
/* prepare for next kickoff, may include waiting on previous kickoff */
SDE_ATRACE_BEGIN("sde_encoder_prepare_for_kickoff");
for (i = 0; i < sde_enc->num_phys_encs; i++) {
@@ -4813,6 +4808,7 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
struct sde_encoder_phys *phys;
int i;
struct sde_hw_ctl *ctl;
+ int rc = 0;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
@@ -4820,6 +4816,11 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ /* update the qsync parameters for the current frame */
+ if (sde_enc->cur_master)
+ sde_connector_set_qsync_params(
+ sde_enc->cur_master->connector);
+
for (i = 0; i < sde_enc->num_phys_encs; i++) {
phys = sde_enc->phys_encs[i];
if (phys && phys->ops.prepare_commit)
@@ -4836,6 +4837,16 @@ void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
ctl->ops.clear_pending_flush(ctl);
}
}
+
+ if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+ rc = sde_connector_prepare_commit(
+ sde_enc->cur_master->connector);
+ if (rc)
+ SDE_ERROR_ENC(sde_enc,
+ "prepare commit failed conn %d rc %d\n",
+ sde_enc->cur_master->connector->base.id,
+ rc);
+ }
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 231ffce0bc18..34ebe8d2503d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -417,7 +417,7 @@ static void _sde_encoder_phys_vid_setup_avr(
return;
}
- if (qsync_min_fps >= default_fps) {
+ if (qsync_min_fps > default_fps) {
SDE_ERROR_VIDENC(vid_enc,
"qsync fps %d must be less than default %d\n",
qsync_min_fps, default_fps);
@@ -1079,9 +1079,6 @@ static int sde_encoder_phys_vid_prepare_for_kickoff(
vid_enc->error_count = 0;
}
- if (sde_connector_is_qsync_updated(phys_enc->connector))
- _sde_encoder_phys_vid_avr_ctrl(phys_enc);
-
programmable_rot_fetch_config(phys_enc,
params->inline_rotate_prefill, params->is_primary);
@@ -1229,6 +1226,20 @@ static void sde_encoder_phys_vid_handle_post_kickoff(
}
}
+static void sde_encoder_phys_vid_prepare_for_commit(
+ struct sde_encoder_phys *phys_enc)
+{
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder parameters\n");
+ return;
+ }
+
+ if (sde_connector_is_qsync_updated(phys_enc->connector))
+ _sde_encoder_phys_vid_avr_ctrl(phys_enc);
+
+}
+
static void sde_encoder_phys_vid_irq_control(struct sde_encoder_phys *phys_enc,
bool enable)
{
@@ -1383,6 +1394,7 @@ static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
ops->get_wr_line_count = sde_encoder_phys_vid_get_line_count;
ops->wait_dma_trigger = sde_encoder_phys_vid_wait_dma_trigger;
ops->wait_for_active = sde_encoder_phys_vid_wait_for_active;
+ ops->prepare_commit = sde_encoder_phys_vid_prepare_for_commit;
}
struct sde_encoder_phys *sde_encoder_phys_vid_init(
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 4d831074d2f6..fe0c22d09cd0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -173,6 +173,7 @@ enum sde_prop {
SEC_SID_MASK,
LINE_INSERTION,
BASE_LAYER,
+ SDE_LIMITS,
SDE_PROP_MAX,
};
@@ -268,6 +269,14 @@ enum {
};
enum {
+ LIMIT_NAME,
+ LIMIT_USECASE,
+ LIMIT_ID,
+ LIMIT_VALUE,
+ LIMIT_PROP_MAX,
+};
+
+enum {
PP_OFF,
PP_LEN,
TE_OFF,
@@ -463,6 +472,7 @@ static struct sde_prop_type sde_prop[] = {
{SEC_SID_MASK, "qcom,sde-secure-sid-mask", false, PROP_TYPE_U32_ARRAY},
{LINE_INSERTION, "qcom,sde-has-line-insertion", false, PROP_TYPE_BOOL},
{BASE_LAYER, "qcom,sde-mixer-stage-base-layer", false, PROP_TYPE_BOOL},
+ {SDE_LIMITS, "qcom,sde-limits", false, PROP_TYPE_NODE},
};
static struct sde_prop_type sde_perf_prop[] = {
@@ -745,6 +755,14 @@ static struct sde_prop_type inline_rot_prop[INLINE_ROT_PROP_MAX] = {
PROP_TYPE_BIT_OFFSET_ARRAY},
};
+static struct sde_prop_type limit_usecase_prop[] = {
+ {LIMIT_NAME, "qcom,sde-limit-name", false, PROP_TYPE_STRING},
+ {LIMIT_USECASE, "qcom,sde-limit-cases", false, PROP_TYPE_STRING_ARRAY},
+ {LIMIT_ID, "qcom,sde-limit-ids", false, PROP_TYPE_U32_ARRAY},
+ {LIMIT_VALUE, "qcom,sde-limit-values", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
/*************************************************************
* static API list
*************************************************************/
@@ -2935,6 +2953,148 @@ end:
return rc;
}
+static int sde_read_limit_node(struct device_node *snp,
+ struct sde_prop_value *lmt_val, struct sde_mdss_cfg *cfg)
+{
+ int j, i = 0, rc = 0;
+ const char *type = NULL;
+ struct device_node *node = NULL;
+
+ for_each_child_of_node(snp, node) {
+ cfg->limit_cfg[i].vector_cfg =
+ kcalloc(cfg->limit_cfg[i].lmt_case_cnt,
+ sizeof(struct limit_vector_cfg), GFP_KERNEL);
+ if (!cfg->limit_cfg[i].vector_cfg) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ for (j = 0; j < cfg->limit_cfg[i].lmt_case_cnt; j++) {
+ of_property_read_string_index(node,
+ limit_usecase_prop[LIMIT_USECASE].prop_name,
+ j, &type);
+ cfg->limit_cfg[i].vector_cfg[j].usecase = type;
+ cfg->limit_cfg[i].vector_cfg[j].value =
+ PROP_VALUE_ACCESS(&lmt_val[i * LIMIT_PROP_MAX],
+ LIMIT_ID, j);
+ }
+
+ cfg->limit_cfg[i].value_cfg =
+ kcalloc(cfg->limit_cfg[i].lmt_vec_cnt,
+ sizeof(struct limit_value_cfg), GFP_KERNEL);
+
+ if (!cfg->limit_cfg[i].value_cfg) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ for (j = 0; j < cfg->limit_cfg[i].lmt_vec_cnt; j++) {
+ cfg->limit_cfg[i].value_cfg[j].use_concur =
+ PROP_BITVALUE_ACCESS(
+ &lmt_val[i * LIMIT_PROP_MAX],
+ LIMIT_VALUE, j, 0);
+ cfg->limit_cfg[i].value_cfg[j].value =
+ PROP_BITVALUE_ACCESS(
+ &lmt_val[i * LIMIT_PROP_MAX],
+ LIMIT_VALUE, j, 1);
+ }
+ i++;
+ }
+
+ return 0;
+error:
+ for (j = 0; j < cfg->limit_count; j++) {
+ kfree(cfg->limit_cfg[j].vector_cfg);
+ kfree(cfg->limit_cfg[j].value_cfg);
+ }
+
+ cfg->limit_count = 0;
+ return rc;
+}
+
+static int sde_validate_limit_node(struct device_node *snp,
+ struct sde_prop_value *sde_limit_value, struct sde_mdss_cfg *cfg)
+{
+ int i = 0, rc = 0;
+ struct device_node *node = NULL;
+ int limit_value_count[LIMIT_PROP_MAX];
+ bool limit_value_exists[LIMIT_SUBBLK_COUNT_MAX][LIMIT_PROP_MAX];
+ const char *type = NULL;
+
+ for_each_child_of_node(snp, node) {
+ rc = _validate_dt_entry(node, limit_usecase_prop,
+ ARRAY_SIZE(limit_usecase_prop),
+ limit_value_count, NULL);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(node, limit_usecase_prop,
+ ARRAY_SIZE(limit_usecase_prop), limit_value_count,
+ &limit_value_exists[i][0],
+ &sde_limit_value[i * LIMIT_PROP_MAX]);
+ if (rc)
+ goto end;
+
+ cfg->limit_cfg[i].lmt_case_cnt =
+ limit_value_count[LIMIT_ID];
+
+ cfg->limit_cfg[i].lmt_vec_cnt =
+ limit_value_count[LIMIT_VALUE];
+ of_property_read_string(node,
+ limit_usecase_prop[LIMIT_NAME].prop_name, &type);
+ cfg->limit_cfg[i].name = type;
+
+ if (!limit_value_count[LIMIT_ID] ||
+ !limit_value_count[LIMIT_VALUE]) {
+ rc = -EINVAL;
+ goto end;
+ }
+ i++;
+ }
+ return 0;
+end:
+ cfg->limit_count = 0;
+ return rc;
+}
+
+static int sde_limit_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+ struct device_node *snp = NULL;
+ struct sde_prop_value *sde_limit_value = NULL;
+ int rc = 0;
+
+ snp = of_get_child_by_name(np, sde_prop[SDE_LIMITS].prop_name);
+ if (!snp)
+ goto end;
+
+ cfg->limit_count = of_get_child_count(snp);
+ if (cfg->limit_count < 0) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sde_limit_value = kzalloc(cfg->limit_count * LIMIT_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!sde_limit_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = sde_validate_limit_node(snp, sde_limit_value, cfg);
+ if (rc) {
+ SDE_ERROR("validating limit node failed\n");
+ goto end;
+ }
+
+ rc = sde_read_limit_node(snp, sde_limit_value, cfg);
+ if (rc)
+ SDE_ERROR("reading limit node failed\n");
+
+end:
+ kfree(sde_limit_value);
+ return rc;
+}
+
static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
{
int rc, i, dma_rc, len, prop_count[SDE_PROP_MAX];
@@ -3103,6 +3263,10 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->has_line_insertion = PROP_VALUE_ACCESS(prop_value,
LINE_INSERTION, 0);
cfg->has_base_layer = PROP_VALUE_ACCESS(prop_value, BASE_LAYER, 0);
+
+ rc = sde_limit_parse_dt(np, cfg);
+ if (rc)
+ SDE_DEBUG("parsing of sde limit failed\n");
end:
kfree(prop_value);
return rc;
@@ -3919,6 +4083,11 @@ void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
}
+ for (i = 0; i < sde_cfg->limit_count; i++) {
+ kfree(sde_cfg->limit_cfg[i].vector_cfg);
+ kfree(sde_cfg->limit_cfg[i].value_cfg);
+ }
+
for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 0affc80ce8fb..95c267021de5 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -83,6 +83,7 @@
#define MAX_XIN_COUNT 16
#define SSPP_SUBBLK_COUNT_MAX 2
+#define LIMIT_SUBBLK_COUNT_MAX 10
#define SDE_CTL_CFG_VERSION_1_0_0 0x100
#define MAX_INTF_PER_CTL_V1 2
@@ -1089,6 +1090,42 @@ struct sde_perf_cfg {
};
/**
+ * struct limit_vector_cfg - information on the usecase for each limit
+ * @usecase: usecase for each limit
+ * @value: id corresponding to each usecase
+ */
+struct limit_vector_cfg {
+ const char *usecase;
+ u32 value;
+};
+
+/**
+ * struct limit_value_cfg - information on the value of usecase
+ * @use_concur: usecase for each limit
+ * @value: value corresponding to usecase for each limit
+ */
+struct limit_value_cfg {
+ u32 use_concur;
+ u32 value;
+};
+
+/**
+ * struct sde_limit_cfg - information om different mdp limits
+ * @name: name of the limit property
+ * @lmt_vec_cnt: number of vector values for each limit
+ * @lmt_case_cnt: number of usecases for each limit
+ * @vector_cfg: pointer to the vector entries containing info on usecase
+ * @value_cfg: pointer to the value of each vector entry
+ */
+struct sde_limit_cfg {
+ const char *name;
+ u32 lmt_vec_cnt;
+ u32 lmt_case_cnt;
+ struct limit_vector_cfg *vector_cfg;
+ struct limit_value_cfg *value_cfg;
+};
+
+/**
* struct sde_mdss_cfg - information of MDSS HW
* This is the main catalog data structure representing
* this HW version. Contains number of instances,
@@ -1253,6 +1290,8 @@ struct sde_mdss_cfg {
u32 qdss_count;
struct sde_qdss_cfg qdss[MAX_BLOCKS];
+ u32 limit_count;
+ struct sde_limit_cfg limit_cfg[LIMIT_SUBBLK_COUNT_MAX];
/* Add additional block data structures here */
struct sde_perf_cfg perf;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
index dbbb86ccb990..b88d4b525db2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_intf.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -90,9 +90,6 @@
#define INTF_TEAR_AUTOREFRESH_CONFIG 0x2B4
#define INTF_TEAR_TEAR_DETECT_CTRL 0x2B8
-#define AVR_CONTINUOUS_MODE 1
-#define AVR_ONE_SHOT_MODE 2
-
static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
struct sde_mdss_cfg *m,
void __iomem *addr,
@@ -136,7 +133,7 @@ static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx,
u32 min_fps, default_fps, diff_fps;
u32 vsync_period_slow;
u32 avr_vtotal;
- u32 add_porches;
+ u32 add_porches = 0;
if (!ctx || !params || !avr_params) {
SDE_ERROR("invalid input parameter(s)\n");
@@ -153,7 +150,10 @@ static int sde_hw_intf_avr_setup(struct sde_hw_intf *ctx,
vsync_period = params->vsync_pulse_width +
params->v_back_porch + params->height +
params->v_front_porch;
- add_porches = mult_frac(vsync_period, diff_fps, min_fps);
+
+ if (diff_fps)
+ add_porches = mult_frac(vsync_period, diff_fps, min_fps);
+
vsync_period_slow = vsync_period + add_porches;
avr_vtotal = vsync_period_slow * hsync_period;
@@ -175,7 +175,8 @@ static void sde_hw_intf_avr_ctrl(struct sde_hw_intf *ctx,
c = &ctx->hw;
if (avr_params->avr_mode) {
avr_ctrl = BIT(0);
- avr_mode = (avr_params->avr_mode == AVR_ONE_SHOT_MODE) ?
+ avr_mode =
+ (avr_params->avr_mode == SDE_RM_QSYNC_ONE_SHOT_MODE) ?
(BIT(0) | BIT(8)) : 0x0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 4a5c661603c9..4e1ba5e307f3 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1054,7 +1054,7 @@ static void sde_kms_commit(struct msm_kms *kms,
SDE_ATRACE_END("sde_kms_commit");
}
-static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
+void sde_kms_release_splash_resource(struct sde_kms *sde_kms,
struct drm_crtc *crtc)
{
struct msm_drm_private *priv;
@@ -1117,6 +1117,8 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
struct drm_crtc_state *old_crtc_state;
struct drm_connector *connector;
struct drm_connector_state *old_conn_state;
+ struct msm_display_conn_params params;
+
int i, rc = 0;
if (!kms || !old_state)
@@ -1148,7 +1150,18 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
c_conn = to_sde_connector(connector);
if (!c_conn->ops.post_kickoff)
continue;
- rc = c_conn->ops.post_kickoff(connector);
+
+ memset(&params, 0, sizeof(params));
+
+ if (c_conn->qsync_updated &&
+ (c_conn->qsync_mode == SDE_RM_QSYNC_ONE_SHOT_MODE)) {
+ /* Reset qsync states if mode is one shot */
+ params.qsync_mode = c_conn->qsync_mode = 0;
+ params.qsync_update = true;
+ SDE_EVT32(connector->base.id, c_conn->qsync_mode);
+ }
+
+ rc = c_conn->ops.post_kickoff(connector, &params);
if (rc) {
pr_err("Connector Post kickoff failed rc=%d\n",
rc);
@@ -1158,7 +1171,7 @@ static void sde_kms_complete_commit(struct msm_kms *kms,
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
- _sde_kms_release_splash_resource(sde_kms, crtc);
+ sde_kms_release_splash_resource(sde_kms, crtc);
SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
SDE_ATRACE_END("sde_kms_complete_commit");
@@ -1381,6 +1394,7 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.cmd_transfer = dsi_display_cmd_transfer,
.cont_splash_config = dsi_display_cont_splash_config,
.get_panel_vfp = dsi_display_get_panel_vfp,
+ .prepare_commit = dsi_conn_prepare_commit,
};
static const struct sde_connector_ops wb_ops = {
.post_init = sde_wb_connector_post_init,
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 9e6f3b09bcd1..1001bc1a48be 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -673,4 +673,12 @@ void sde_kms_timeline_status(struct drm_device *dev);
*/
int sde_kms_handle_recovery(struct drm_encoder *encoder);
+/**
+ * sde_kms_release_splash_resource - release splash resource
+ * @sde_kms: poiner to sde_kms structure
+ * @crtc: crtc that splash resource to be released from
+ */
+void sde_kms_release_splash_resource(struct sde_kms *sde_kms,
+ struct drm_crtc *crtc);
+
#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_reg_dma.c b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
index 2bbd7c16df6e..4bb0298b622a 100644
--- a/drivers/gpu/drm/msm/sde/sde_reg_dma.c
+++ b/drivers/gpu/drm/msm/sde/sde_reg_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,7 @@
#define REG_DMA_VER_1_0 0x00010000
#define REG_DMA_VER_1_1 0x00010001
+#define REG_DMA_VER_1_2 0x00010002
static int default_check_support(enum sde_reg_dma_features feature,
enum sde_reg_dma_blk blk,
@@ -109,6 +110,7 @@ int sde_reg_dma_init(void __iomem *addr, struct sde_mdss_cfg *m,
DRM_DEBUG("init v1 dma ops failed\n");
break;
case REG_DMA_VER_1_1:
+ case REG_DMA_VER_1_2:
rc = init_v11(&reg_dma);
if (rc)
DRM_DEBUG("init v11 dma ops failed\n");
@@ -142,6 +144,7 @@ void sde_reg_dma_deinit(void)
deinit_v1();
break;
case REG_DMA_VER_1_1:
+ case REG_DMA_VER_1_2:
deinit_v1();
break;
default:
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
index a833f180e99d..66fd28093ffc 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.c
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -1533,7 +1533,7 @@ static int _sde_rm_populate_requirements(
struct sde_rm_requirements *reqs)
{
const struct drm_display_mode *mode = &crtc_state->mode;
- int i;
+ int i, num_lm;
memset(reqs, 0, sizeof(*reqs));
@@ -1583,9 +1583,18 @@ static int _sde_rm_populate_requirements(
*/
reqs->topology =
&rm->topology_tbl[SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE];
- if (sde_crtc_get_num_datapath(crtc_state->crtc) == 1)
+
+ num_lm = sde_crtc_get_num_datapath(crtc_state->crtc,
+ conn_state->connector);
+
+ if (num_lm == 1)
reqs->topology =
&rm->topology_tbl[SDE_RM_TOPOLOGY_SINGLEPIPE];
+ else if (num_lm == 0)
+ SDE_ERROR("Primary layer mixer is not set\n");
+
+ SDE_EVT32(num_lm, reqs->topology->num_lm,
+ reqs->topology->top_name, reqs->topology->num_ctl);
}
SDE_DEBUG("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
index 7cb308875ea5..ab6beaf928dc 100644
--- a/drivers/gpu/drm/msm/sde/sde_rm.h
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -76,10 +76,13 @@ enum sde_rm_topology_control {
* disable state.
* @SDE_RM_QSYNC_CONTINUOUS_MODE: If set, Qsync is enabled in continuous
* mode.
+ * @SDE_RM_QSYNC_ONE_SHOT_MODE: If set, Qsync is enabled in one shot mode.
+ *
*/
enum sde_rm_qsync_modes {
SDE_RM_QSYNC_DISABLED,
SDE_RM_QSYNC_CONTINUOUS_MODE,
+ SDE_RM_QSYNC_ONE_SHOT_MODE
};
/**
diff --git a/drivers/gpu/drm/msm/shd/shd_drm.c b/drivers/gpu/drm/msm/shd/shd_drm.c
index 58ce011473e7..e57273821722 100644
--- a/drivers/gpu/drm/msm/shd/shd_drm.c
+++ b/drivers/gpu/drm/msm/shd/shd_drm.c
@@ -172,17 +172,23 @@ static int shd_display_init_base_crtc(struct drm_device *dev,
priv = dev->dev_private;
- /* find last crtc for base encoder */
- for (i = priv->num_crtcs - 1; i >= 0; i--) {
- if (base->encoder->possible_crtcs & (1 << i)) {
- crtc = priv->crtcs[i];
- crtc_idx = i;
- break;
+ if (base->encoder->crtc) {
+ /* if cont splash is enabled on crtc */
+ crtc = base->encoder->crtc;
+ crtc_idx = drm_crtc_index(crtc);
+ } else {
+ /* find last crtc for base encoder */
+ for (i = priv->num_crtcs - 1; i >= 0; i--) {
+ if (base->encoder->possible_crtcs & (1 << i)) {
+ crtc = priv->crtcs[i];
+ crtc_idx = i;
+ break;
+ }
}
- }
- if (!crtc)
- return -ENOENT;
+ if (!crtc)
+ return -ENOENT;
+ }
/* disable crtc from other encoders */
for (i = 0; i < priv->num_encoders; i++) {
@@ -219,6 +225,11 @@ static void shd_display_setup_base_mixer_out(struct shd_display_base *base)
hw_lm->cfg.out_height = lm_cfg.out_height;
hw_lm->cfg.right_mixer = lm_cfg.right_mixer;
hw_lm->ops.setup_mixer_out(hw_lm, &lm_cfg);
+ if (sde_crtc->mixers[i].hw_ctl->ops.clear_all_blendstages)
+ sde_crtc->mixers[i].hw_ctl->ops.clear_all_blendstages(
+ sde_crtc->mixers[i].hw_ctl);
+ if (hw_lm->ops.clear_dim_layer)
+ hw_lm->ops.clear_dim_layer(hw_lm);
}
}
@@ -311,6 +322,7 @@ static void shd_display_enable_base(struct drm_device *dev,
drm_bridge_enable(base->encoder->bridge);
base->enabled = true;
+ base->enable_changed = true;
}
static void shd_display_disable_base(struct drm_device *dev,
@@ -350,6 +362,7 @@ static void shd_display_enable(struct shd_display *display)
mutex_lock(&base->base_mutex);
display->enabled = true;
+ display->enable_changed = true;
if (!base->enabled)
shd_display_enable_base(dev, base);
@@ -388,6 +401,110 @@ end:
mutex_unlock(&base->base_mutex);
}
+static void shd_display_complete(struct sde_kms *sde_kms,
+ struct shd_display *display)
+{
+ if (display->enable_changed) {
+ struct shd_display_base *base = display->base;
+
+ display->enable_changed = false;
+
+ mutex_lock(&base->base_mutex);
+
+ if (base->enable_changed) {
+ base->enable_changed = false;
+ sde_kms_release_splash_resource(sde_kms, base->crtc);
+
+ /*
+ * Base display is invisible to both user space
+ * and kernel, here we mark all state as inactive
+ * to avoid update from suspend and resume.
+ */
+ base->crtc->enabled = false;
+ drm_atomic_set_mode_prop_for_crtc(base->crtc->state,
+ NULL);
+ base->crtc->state->active = false;
+ base->connector->state->crtc = NULL;
+ base->connector->state->best_encoder = NULL;
+ }
+
+ mutex_unlock(&base->base_mutex);
+ }
+}
+
+static int shd_display_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_crtc_state *crtc_state;
+ struct drm_connector_state *conn_state;
+ struct sde_kms *sde_kms;
+ struct shd_display_base *base;
+ struct drm_atomic_state *state;
+ struct drm_atomic_state *suspend_state;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ret = g_shd_kms->orig_funcs->pm_suspend(dev);
+ if (ret)
+ return ret;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
+
+ state = drm_atomic_state_alloc(ddev);
+ if (!state)
+ return -ENOMEM;
+
+ suspend_state = sde_kms->suspend_state;
+
+ /* initialize connectors structure */
+ state->connectors = kcalloc(suspend_state->num_connector,
+ sizeof(*state->connectors), GFP_KERNEL);
+ if (!state->connectors) {
+ ret = -ENOMEM;
+ goto clear;
+ }
+ state->num_connector = suspend_state->num_connector;
+
+ /*
+ * move base states to temp state and clear later
+ */
+ list_for_each_entry(base, &g_base_list, head) {
+ crtc_state = drm_atomic_get_existing_crtc_state(
+ suspend_state, base->crtc);
+ if (crtc_state) {
+ int index = drm_crtc_index(base->crtc);
+
+ state->crtcs[index] =
+ suspend_state->crtcs[index];
+ memset(&suspend_state->crtcs[index],
+ 0, sizeof(*suspend_state->crtcs));
+ }
+
+ conn_state = drm_atomic_get_existing_connector_state(
+ suspend_state, base->connector);
+ if (conn_state) {
+ int index = drm_connector_index(base->connector);
+
+ state->connectors[index] =
+ suspend_state->connectors[index];
+ memset(&suspend_state->connectors[index],
+ 0, sizeof(*suspend_state->connectors));
+ }
+ }
+
+clear:
+ /* clear base states */
+ drm_atomic_state_put(state);
+
+ return ret;
+}
+
static int shd_crtc_validate_shared_display(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -557,13 +674,15 @@ static void shd_display_complete_commit(struct msm_kms *kms,
shd_crtc_atomic_check)
continue;
+ sde_crtc = to_sde_crtc(crtc);
+ shd_crtc = sde_crtc->priv_handle;
+ shd_display_complete(sde_kms, shd_crtc->display);
+
if (!old_crtc_state->active ||
new_crtc_state->active ||
!drm_atomic_crtc_needs_modeset(new_crtc_state))
continue;
- sde_crtc = to_sde_crtc(crtc);
- shd_crtc = sde_crtc->priv_handle;
shd_display_disable(shd_crtc->display);
}
@@ -1023,6 +1142,7 @@ static int shd_drm_base_init(struct drm_device *ddev,
g_shd_kms->orig_funcs = priv->kms->funcs;
g_shd_kms->funcs.prepare_commit = shd_display_prepare_commit;
g_shd_kms->funcs.complete_commit = shd_display_complete_commit;
+ g_shd_kms->funcs.pm_suspend = shd_display_pm_suspend;
priv->kms->funcs = &g_shd_kms->funcs;
}
diff --git a/drivers/gpu/drm/msm/shd/shd_drm.h b/drivers/gpu/drm/msm/shd/shd_drm.h
index c3924c27c453..fd9779f698bc 100644
--- a/drivers/gpu/drm/msm/shd/shd_drm.h
+++ b/drivers/gpu/drm/msm/shd/shd_drm.h
@@ -49,6 +49,7 @@ struct shd_display_base {
int connector_type;
bool mst_port;
bool enabled;
+ bool enable_changed;
};
struct shd_display {
@@ -70,6 +71,7 @@ struct shd_display {
struct list_head head;
bool enabled;
+ bool enable_changed;
};
/* drm internal header */
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index d8cb99c443cb..544740607a93 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -363,9 +363,9 @@ static const unsigned int a5xx_registers[] = {
0x04E0, 0x04F4, 0X04F8, 0x0529, 0x0531, 0x0533, 0x0540, 0x0555,
0xF400, 0xF400, 0xF800, 0xF807,
/* CP */
- 0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0,
- 0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28, 0x0B78, 0x0B7F,
- 0x0BB0, 0x0BBD,
+ 0x0800, 0x0803, 0x0806, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860,
+ 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0X0B1C, 0X0B1E, 0x0B28,
+ 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD,
/* VSC */
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61,
/* GRAS */
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 472c933aecb1..063c36582b18 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -296,13 +296,13 @@ static const unsigned int a6xx_registers[] = {
0x0500, 0x0500, 0x0505, 0x050B, 0x050E, 0x0511, 0x0533, 0x0533,
0x0540, 0x0555,
/* CP */
- 0x0800, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821, 0x0823, 0x0824,
- 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843, 0x084F, 0x086F,
- 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4, 0x08D0, 0x08DD,
- 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911, 0x0928, 0x093E,
- 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996, 0x0998, 0x099E,
- 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1, 0x09C2, 0x09C8,
- 0x0A00, 0x0A03,
+ 0x0800, 0x0803, 0x0806, 0x0808, 0x0810, 0x0813, 0x0820, 0x0821,
+ 0x0823, 0x0824, 0x0826, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+ 0x084F, 0x086F, 0x0880, 0x088A, 0x08A0, 0x08AB, 0x08C0, 0x08C4,
+ 0x08D0, 0x08DD, 0x08F0, 0x08F3, 0x0900, 0x0903, 0x0908, 0x0911,
+ 0x0928, 0x093E, 0x0942, 0x094D, 0x0980, 0x0984, 0x098D, 0x0996,
+ 0x0998, 0x099E, 0x09A0, 0x09A6, 0x09A8, 0x09AE, 0x09B0, 0x09B1,
+ 0x09C2, 0x09C8, 0x0A00, 0x0A03,
/* VSC */
0x0C00, 0x0C04, 0x0C06, 0x0C06, 0x0C10, 0x0CD9, 0x0E00, 0x0E0E,
/* UCHE */
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 11c69b4e8b7b..4e4be1e64cef 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1396,6 +1396,22 @@ int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
user_ts = *timestamp;
+ /*
+ * If there is only one drawobj in the array and it is of
+ * type SYNCOBJ_TYPE, skip comparing user_ts as it can be 0
+ */
+ if (!(count == 1 && drawobj[0]->type == SYNCOBJ_TYPE) &&
+ (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS)) {
+ /*
+ * User specified timestamps need to be greater than the last
+ * issued timestamp in the context
+ */
+ if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0) {
+ spin_unlock(&drawctxt->lock);
+ return -ERANGE;
+ }
+ }
+
for (i = 0; i < count; i++) {
switch (drawobj[i]->type) {
@@ -1675,21 +1691,33 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj,
- int fault, bool gx_on)
+ int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt =
+ drawobj ? ADRENO_CONTEXT(drawobj->context) : NULL;
+ struct gmu_dev_ops *gmu_dev_ops = GMU_DEVICE_OPS(device);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
uint64_t ib1base, ib2base;
+ bool gx_on = true;
+ int id = (rb != NULL) ? rb->id : -1;
+ const char *type = fault & ADRENO_GMU_FAULT ? "gmu" : "gpu";
+
+ if (GMU_DEV_OP_VALID(gmu_dev_ops, gx_is_on))
+ gx_on = gmu_dev_ops->gx_is_on(adreno_dev);
- /*
- * GPU registers can't be accessed if the gx headswitch is off.
- * During the gx off case access to GPU gx blocks will show data
- * as 0x5c00bd00. Hence skip adreno fault header dump.
- */
if (!gx_on) {
- dev_err(device->dev, "%s fault and gx is off\n",
- fault & ADRENO_GMU_FAULT ? "GMU" : "GPU");
+ if (drawobj != NULL)
+ pr_fault(device, drawobj,
+ "%s fault ctx %d ctx_type %s ts %d and GX is OFF\n",
+ type, drawobj->context->id,
+ get_api_type_str(drawctxt->type),
+ drawobj->timestamp);
+ else
+ dev_err(device->dev, "RB[%d] : %s fault and GX is OFF\n",
+ id, type);
+
return;
}
@@ -1704,9 +1732,6 @@ static void adreno_fault_header(struct kgsl_device *device,
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
if (drawobj != NULL) {
- struct adreno_context *drawctxt =
- ADRENO_CONTEXT(drawobj->context);
-
drawctxt->base.total_fault_count++;
drawctxt->base.last_faulted_cmd_ts = drawobj->timestamp;
@@ -1716,26 +1741,27 @@ static void adreno_fault_header(struct kgsl_device *device,
ib2base, ib2sz, drawctxt->rb->id);
pr_fault(device, drawobj,
- "gpu fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- drawobj->context->id, get_api_type_str(drawctxt->type),
+ "%s fault ctx %d ctx_type %s ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ type, drawobj->context->id,
+ get_api_type_str(drawctxt->type),
drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
pr_fault(device, drawobj,
- "gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
- rb->id, rptr, rb->wptr);
+ "%s fault rb %d rb sw r/w %4.4x/%4.4x\n",
+ type, rb->id, rptr, rb->wptr);
} else {
int id = (rb != NULL) ? rb->id : -1;
dev_err(device->dev,
- "RB[%d]: gpu fault status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- id, status, rptr, wptr, ib1base, ib1sz, ib2base,
+ "RB[%d]: %s fault status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ id, type, status, rptr, wptr, ib1base, ib1sz, ib2base,
ib2sz);
if (rb != NULL)
dev_err(device->dev,
- "RB[%d] gpu fault rb sw r/w %4.4x/%4.4x\n",
- rb->id, rptr, rb->wptr);
+ "RB[%d] %s fault rb sw r/w %4.4x/%4.4x\n",
+ rb->id, type, rptr, rb->wptr);
}
}
@@ -2051,14 +2077,13 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device, int fault,
- struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj,
- bool gx_on)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
/* Always dump the snapshot on a non-drawobj failure */
if (cmdobj == NULL) {
- adreno_fault_header(device, rb, NULL, fault, gx_on);
+ adreno_fault_header(device, rb, NULL, fault);
kgsl_device_snapshot(device, NULL, fault & ADRENO_GMU_FAULT);
return;
}
@@ -2068,7 +2093,7 @@ static void do_header_and_snapshot(struct kgsl_device *device, int fault,
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdobj, fault, gx_on);
+ adreno_fault_header(device, rb, cmdobj, fault);
if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
kgsl_device_snapshot(device, drawobj->context,
@@ -2196,7 +2221,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, fault, hung_rb, cmdobj, gx_on);
+ do_header_and_snapshot(device, fault, hung_rb, cmdobj);
/* Turn off the KEEPALIVE vote from the ISR for hard fault */
if (gpudev->gpu_keepalive && fault & ADRENO_HARD_FAULT)
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 07bf5dd1a078..735bf5e5f4a3 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -675,10 +675,8 @@ static void _get_entries(struct kgsl_process_private *private,
static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
struct _mem_entry *preventry, struct _mem_entry *nextentry,
- struct kgsl_context *context)
+ struct kgsl_process_private *private)
{
- struct kgsl_process_private *private;
-
memset(preventry, 0, sizeof(*preventry));
memset(nextentry, 0, sizeof(*nextentry));
@@ -687,8 +685,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
- } else if (context) {
- private = context->proc_priv;
+ } else if (private) {
spin_lock(&private->mem_lock);
_get_entries(private, faultaddr, preventry, nextentry);
spin_unlock(&private->mem_lock);
@@ -755,7 +752,7 @@ kgsl_iommu_uche_overfetch(struct kgsl_process_private *private,
*/
static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
- struct kgsl_context *context)
+ struct kgsl_process_private *private)
{
/*
* If there is no context associated with the pagefault then this
@@ -763,10 +760,28 @@ static bool kgsl_iommu_suppress_pagefault(uint64_t faultaddr, int write,
* on global buffers as they are mainly accessed by the CP bypassing
* the UCHE. Also, write pagefaults are never suppressed.
*/
- if (!context || write)
+ if (!private || write)
return false;
- return kgsl_iommu_uche_overfetch(context->proc_priv, faultaddr);
+ return kgsl_iommu_uche_overfetch(private, faultaddr);
+}
+
+static struct kgsl_process_private *kgsl_iommu_identify_process(u64 ptbase)
+{
+ struct kgsl_process_private *p = NULL;
+ struct kgsl_iommu_pt *iommu_pt;
+
+ mutex_lock(&kgsl_driver.process_mutex);
+ list_for_each_entry(p, &kgsl_driver.process_list, list) {
+ iommu_pt = p->pagetable->priv;
+ if (iommu_pt->ttbr0 == ptbase) {
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return p;
+ }
+ }
+
+ mutex_unlock(&kgsl_driver.process_mutex);
+ return p;
}
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
@@ -777,7 +792,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct kgsl_mmu *mmu = pt->mmu;
struct kgsl_iommu *iommu;
struct kgsl_iommu_context *ctx;
- u64 ptbase, proc_ptbase;
+ u64 ptbase;
u32 contextidr;
pid_t pid = 0;
pid_t ptname;
@@ -787,9 +802,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct adreno_device *adreno_dev;
struct adreno_gpudev *gpudev;
unsigned int no_page_fault_log = 0;
- unsigned int curr_context_id = 0;
- struct kgsl_context *context;
char *fault_type = "unknown";
+ struct kgsl_process_private *private;
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
@@ -804,21 +818,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
adreno_dev = ADRENO_DEVICE(device);
gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- if (pt->name == KGSL_MMU_SECURE_PT)
- ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
-
- /*
- * set the fault bits and stuff before any printks so that if fault
- * handler runs then it will know it's dealing with a pagefault.
- * Read the global current timestamp because we could be in middle of
- * RB switch and hence the cur RB may not be reliable but global
- * one will always be reliable
- */
- kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
-
- context = kgsl_context_get(device, curr_context_id);
-
write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
if (flags & IOMMU_FAULT_TRANSLATION)
fault_type = "translation";
@@ -829,17 +828,21 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
fault_type = "transaction stalled";
- if (kgsl_iommu_suppress_pagefault(addr, write, context)) {
+ ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
+ private = kgsl_iommu_identify_process(ptbase);
+
+ if (!kgsl_process_private_get(private))
+ private = NULL;
+ else
+ pid = private->pid;
+
+ if (kgsl_iommu_suppress_pagefault(addr, write, private)) {
iommu->pagefault_suppression_count++;
- kgsl_context_put(context);
return ret;
}
- if (context != NULL) {
- /* save pagefault timestamp for GFT */
- set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
- pid = context->proc_priv->pid;
- }
+ if (pt->name == KGSL_MMU_SECURE_PT)
+ ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
ctx->fault = 1;
@@ -855,9 +858,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
mutex_unlock(&device->mutex);
}
- ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
-
ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
KGSL_MMU_GLOBAL_PT : pid;
/*
@@ -867,7 +868,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
*/
trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
ptname,
- context != NULL ? context->proc_priv->comm : "unknown",
+ private != NULL ? private->comm : "unknown",
write ? "write" : "read");
if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
@@ -875,34 +876,13 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
if (!no_page_fault_log && __ratelimit(&_rs)) {
- const char *api_str;
-
- if (context != NULL) {
- struct adreno_context *drawctxt =
- ADRENO_CONTEXT(context);
-
- api_str = get_api_type_str(drawctxt->type);
- } else
- api_str = "UNKNOWN";
-
KGSL_MEM_CRIT(ctx->kgsldev,
"GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr,
ptname,
- context != NULL ? context->proc_priv->comm : "unknown");
-
- if (context != NULL) {
- proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(
- context->proc_priv->pagetable);
-
- if (ptbase != proc_ptbase)
- KGSL_MEM_CRIT(ctx->kgsldev,
- "Pagetable address mismatch: HW address is 0x%llx but SW expected 0x%llx\n",
- ptbase, proc_ptbase);
- }
-
+ private != NULL ? private->comm : "unknown");
KGSL_MEM_CRIT(ctx->kgsldev,
- "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
- ctx->name, api_str, ptbase, contextidr,
+ "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
+ ctx->name, ptbase, contextidr,
write ? "write" : "read", fault_type);
if (gpudev->iommu_fault_block) {
@@ -921,7 +901,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
KGSL_LOG_DUMP(ctx->kgsldev,
"---- nearby memory ----\n");
- _find_mem_entries(mmu, addr, &prev, &next, context);
+ _find_mem_entries(mmu, addr, &prev, &next, private);
if (prev.gpuaddr)
_print_entry(ctx->kgsldev, &prev);
else
@@ -963,7 +943,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
adreno_dispatcher_schedule(device);
}
- kgsl_context_put(context);
+ kgsl_process_private_put(private);
+
return ret;
}
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index 1332749eb63e..d9132af66264 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -28,6 +28,7 @@
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_gmu_core.h"
+#include "kgsl_trace_power.h"
#define KGSL_PWRFLAGS_POWER_ON 0
#define KGSL_PWRFLAGS_CLK_ON 1
@@ -492,6 +493,8 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
pwr->previous_pwrlevel,
pwr->pwrlevels[old_level].gpu_freq);
+ trace_gpu_frequency(pwrlevel->gpu_freq/1000, 0);
+
/*
* Some targets do not support the bandwidth requirement of
* GPU at TURBO, for such targets we need to set GPU-BIMC
@@ -2840,6 +2843,7 @@ _aware(struct kgsl_device *device)
case KGSL_STATE_RESET:
if (!gmu_core_gpmu_isenabled(device))
break;
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
status = gmu_core_start(device);
break;
case KGSL_STATE_INIT:
@@ -2904,6 +2908,7 @@ _aware(struct kgsl_device *device)
* to make sure next attempt to wake up
* GMU/GPU is indeed a fresh start.
*/
+ kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
gmu_core_suspend(device);
kgsl_pwrctrl_set_state(device, state);
} else {
diff --git a/drivers/gpu/msm/kgsl_trace.c b/drivers/gpu/msm/kgsl_trace.c
index 3541425ff643..5fd48852b3f3 100644
--- a/drivers/gpu/msm/kgsl_trace.c
+++ b/drivers/gpu/msm/kgsl_trace.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011, 2013, 2015 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011, 2013, 2015, 2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,8 +19,11 @@
/* Instantiate tracepoints */
#define CREATE_TRACE_POINTS
#include "kgsl_trace.h"
+#include "kgsl_trace_power.h"
EXPORT_TRACEPOINT_SYMBOL(kgsl_regwrite);
EXPORT_TRACEPOINT_SYMBOL(kgsl_issueibcmds);
EXPORT_TRACEPOINT_SYMBOL(kgsl_user_pwrlevel_constraint);
EXPORT_TRACEPOINT_SYMBOL(kgsl_constraint);
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_frequency);
diff --git a/drivers/gpu/msm/kgsl_trace_power.h b/drivers/gpu/msm/kgsl_trace_power.h
new file mode 100644
index 000000000000..08ae0cb493fa
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_trace_power.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_KGSL_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _KGSL_TRACE_POWER_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM power
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE kgsl_trace_power
+
+#include <linux/tracepoint.h>
+
+/**
+ * gpu_frequency - Reports frequency changes in GPU clock domains
+ * @state: New frequency (in KHz)
+ * @gpu_id: GPU clock domain
+ */
+TRACE_EVENT(gpu_frequency,
+ TP_PROTO(unsigned int state, unsigned int gpu_id),
+ TP_ARGS(state, gpu_id),
+ TP_STRUCT__entry(
+ __field(unsigned int, state)
+ __field(unsigned int, gpu_id)
+ ),
+ TP_fast_assign(
+ __entry->state = state;
+ __entry->gpu_id = gpu_id;
+ ),
+
+ TP_printk("state=%lu gpu_id=%lu",
+ (unsigned long)__entry->state,
+ (unsigned long)__entry->gpu_id)
+);
+
+#endif /* _KGSL_TRACE_POWER_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index ecf92b7756b0..e8fe837c68df 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -282,7 +282,7 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
mutex_lock(&byte_cntr_data->byte_cntr_lock);
- if (!byte_cntr_data->enable || !byte_cntr_data->block_size) {
+ if (!tmcdrvdata->enable || !byte_cntr_data->block_size) {
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return -EINVAL;
}
@@ -295,6 +295,7 @@ static int tmc_etr_byte_cntr_open(struct inode *in, struct file *fp)
fp->private_data = byte_cntr_data;
nonseekable_open(in, fp);
+ byte_cntr_data->enable = true;
byte_cntr_data->read_active = true;
mutex_unlock(&byte_cntr_data->byte_cntr_lock);
return 0;
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 51459334ff83..9101a3293472 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -73,12 +73,15 @@ do { \
#define BLKSIZE_1024 2
#define BLKSIZE_2048 3
+#define FLUSHPERIOD_2048 0x800
+
struct csr_drvdata {
void __iomem *base;
phys_addr_t pbase;
struct device *dev;
struct coresight_device *csdev;
uint32_t blksize;
+ uint32_t flushperiod;
struct coresight_csr csr;
struct clk *clk;
spinlock_t spin_lock;
@@ -86,6 +89,7 @@ struct csr_drvdata {
bool hwctrl_set_support;
bool set_byte_cntr_support;
bool timestamp_support;
+ bool enable_flush;
};
static LIST_HEAD(csr_list);
@@ -93,10 +97,23 @@ static DEFINE_MUTEX(csr_lock);
#define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
+static void msm_qdss_csr_config_flush_period(struct csr_drvdata *drvdata)
+{
+ uint32_t usbflshctrl;
+
+ CSR_UNLOCK(drvdata);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl = (usbflshctrl & ~0x3FFFC) | (drvdata->flushperiod << 2);
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ CSR_LOCK(drvdata);
+}
+
void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
{
struct csr_drvdata *drvdata;
- uint32_t usbbamctrl, usbflshctrl;
+ uint32_t usbbamctrl;
unsigned long flags;
if (csr == NULL)
@@ -113,12 +130,6 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
- usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
- usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
- csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
- usbflshctrl |= 0x2;
- csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-
usbbamctrl |= 0x4;
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
@@ -127,6 +138,35 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
}
EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
+void msm_qdss_csr_enable_flush(struct coresight_csr *csr)
+{
+ struct csr_drvdata *drvdata;
+ uint32_t usbflshctrl;
+ unsigned long flags;
+
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+ msm_qdss_csr_config_flush_period(drvdata);
+
+ CSR_UNLOCK(drvdata);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl |= 0x2;
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ CSR_LOCK(drvdata);
+ drvdata->enable_flush = true;
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_flush);
+
void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
{
struct csr_drvdata *drvdata;
@@ -173,6 +213,7 @@ void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
CSR_LOCK(drvdata);
+ drvdata->enable_flush = false;
spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
@@ -301,8 +342,59 @@ static ssize_t csr_show_timestamp(struct device *dev,
static DEVICE_ATTR(timestamp, 0444, csr_show_timestamp, NULL);
+static ssize_t flushperiod_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+ dev_err(dev, "Invalid param\n");
+ return -EINVAL;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", drvdata->flushperiod);
+}
+
+static ssize_t flushperiod_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ unsigned long flags;
+ unsigned long val;
+ struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+ dev_err(dev, "Invalid param\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+ if (kstrtoul(buf, 0, &val) || val > 0xffff) {
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+ return -EINVAL;
+ }
+
+ if (drvdata->flushperiod == val)
+ goto out;
+
+ drvdata->flushperiod = val;
+
+ if (drvdata->enable_flush)
+ msm_qdss_csr_config_flush_period(drvdata);
+
+out:
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+ return size;
+}
+
+static DEVICE_ATTR_RW(flushperiod);
+
static struct attribute *csr_attrs[] = {
&dev_attr_timestamp.attr,
+ &dev_attr_flushperiod.attr,
NULL,
};
@@ -380,14 +472,16 @@ static int csr_probe(struct platform_device *pdev)
else
dev_dbg(dev, "timestamp_support operation supported\n");
+ if (drvdata->usb_bam_support)
+ drvdata->flushperiod = FLUSHPERIOD_2048;
+
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->type = CORESIGHT_DEV_TYPE_NONE;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
- if (drvdata->timestamp_support)
- desc->groups = csr_attr_grps;
+ desc->groups = csr_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 25607feee5f9..2a42e25c51a2 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -182,6 +182,7 @@ static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
#ifdef CONFIG_CORESIGHT_CSR
extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
@@ -191,6 +192,7 @@ extern void coresight_csr_set_byte_cntr(struct coresight_csr *csr,
extern struct coresight_csr *coresight_csr_get(const char *name);
#else
static inline void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr) {}
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr) {}
static inline void msm_qdss_csr_disable_flush(struct coresight_csr *csr) {}
static inline int coresight_csr_hwctrl_set(struct coresight_csr *csr,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index c6c07551b2b8..ebf3540827b2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
* Copyright(C) 2016 Linaro Limited. All rights reserved.
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*
@@ -667,6 +667,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
+ msm_qdss_csr_enable_flush(drvdata->csr);
drvdata->enable_to_bam = true;
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index d779d60d420a..1628d936a901 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -5240,8 +5240,8 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
struct arm_smmu_domain *smmu_domain)
{
unsigned long flags;
- u32 halt, fsr, sctlr_orig, sctlr, status;
- void __iomem *base, *cb_base;
+ u32 halt, fsr, status;
+ void __iomem *tbu_base, *cb_base;
if (of_property_read_bool(tbu->dev->of_node,
"qcom,opt-out-tbu-halting")) {
@@ -5257,47 +5257,44 @@ static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu,
}
cb_base = ARM_SMMU_CB(smmu_domain->smmu, smmu_domain->cfg.cbndx);
- base = tbu->base;
- halt = readl_relaxed(base + DEBUG_SID_HALT_REG);
+ tbu_base = tbu->base;
+ halt = readl_relaxed(tbu_base + DEBUG_SID_HALT_REG);
halt |= DEBUG_SID_HALT_VAL;
- writel_relaxed(halt, base + DEBUG_SID_HALT_REG);
-
- if (!readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
- (status & DEBUG_SR_HALT_ACK_VAL),
- 0, TBU_DBG_TIMEOUT_US))
- goto out;
+ writel_relaxed(halt, tbu_base + DEBUG_SID_HALT_REG);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
- if (!(fsr & FSR_FAULT)) {
- dev_err(tbu->dev, "Couldn't halt TBU!\n");
- spin_unlock_irqrestore(&tbu->halt_lock, flags);
- return -ETIMEDOUT;
- }
+ if ((fsr & FSR_FAULT) && (fsr & FSR_SS)) {
+ u32 sctlr_orig, sctlr;
+ /*
+ * We are in a fault; Our request to halt the bus will not
+ * complete until transactions in front of us (such as the fault
+ * itself) have completed. Disable iommu faults and terminate
+ * any existing transactions.
+ */
+ sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+ sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
+ writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
- /*
- * We are in a fault; Our request to halt the bus will not complete
- * until transactions in front of us (such as the fault itself) have
- * completed. Disable iommu faults and terminate any existing
- * transactions.
- */
- sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
- sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
- writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ /*
+ * Barrier required to ensure that the FSR is cleared
+ * before resuming SMMU operation
+ */
+ wmb();
+ writel_relaxed(RESUME_TERMINATE, cb_base +
+ ARM_SMMU_CB_RESUME);
- writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
- writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+ writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+ }
- if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG, status,
+ if (readl_poll_timeout_atomic(tbu_base + DEBUG_SR_HALT_ACK_REG, status,
(status & DEBUG_SR_HALT_ACK_VAL),
0, TBU_DBG_TIMEOUT_US)) {
- dev_err(tbu->dev, "Couldn't halt TBU from fault context!\n");
- writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+ dev_err(tbu->dev, "Couldn't halt TBU!\n");
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return -ETIMEDOUT;
}
- writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
-out:
tbu->halt_count = 1;
spin_unlock_irqrestore(&tbu->halt_lock, flags);
return 0;
@@ -5415,6 +5412,25 @@ static phys_addr_t qsmmuv500_iova_to_phys(
sctlr = sctlr_orig & ~(SCTLR_CFCFG | SCTLR_CFIE);
writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+ fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+ if (fsr & FSR_FAULT) {
+ /* Clear pending interrupts */
+ writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+ /*
+ * Barrier required to ensure that the FSR is cleared
+ * before resuming SMMU operation.
+ */
+ wmb();
+
+ /*
+ * TBU halt takes care of resuming any stalled transcation.
+ * Kept it here for completeness sake.
+ */
+ if (fsr & FSR_SS)
+ writel_relaxed(RESUME_TERMINATE, cb_base +
+ ARM_SMMU_CB_RESUME);
+ }
+
/* Only one concurrent atos operation */
ret = qsmmuv500_ecats_lock(smmu_domain, tbu, &flags);
if (ret)
@@ -5459,10 +5475,12 @@ redo:
val = readq_relaxed(tbu->base + DEBUG_PAR_REG);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
- if (fsr & FSR_FAULT) {
+ if (val & DEBUG_PAR_FAULT_VAL) {
dev_err(tbu->dev, "ECATS generated a fault interrupt! FSR = %llx, SID=0x%x\n",
- fsr, sid);
+ fsr, sid);
+ dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
+ val);
/* Clear pending interrupts */
writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
/*
@@ -5470,12 +5488,11 @@ redo:
* before resuming SMMU operation.
*/
wmb();
- writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
- /* Check if ECATS translation failed */
- if (val & DEBUG_PAR_FAULT_VAL)
- dev_err(tbu->dev, "ECATS translation failed! PAR = %llx\n",
- val);
+ if (fsr & FSR_SS)
+ writel_relaxed(RESUME_TERMINATE, cb_base +
+ ARM_SMMU_CB_RESUME);
+
ret = -EINVAL;
}
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 23c721b072dc..08deff94e783 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -174,7 +174,6 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
nbits, align);
if (unlikely(bit > mapping->num_4k_pages)) {
/* try wrapping */
- mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
bit = bitmap_find_next_zero_area(
mapping->bitmap, mapping->num_4k_pages, 0, nbits,
align);
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
index a97a51965ae3..72dbd937b111 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -714,3 +714,150 @@ void cam_cdm_util_dump_cmd_buf(
}
} while (buf_now <= cmd_buf_end);
}
+
+static long cam_cdm_util_dump_reg_cont_cmd_v2(
+ uint32_t *cmd_buf_addr,
+ struct cam_cdm_cmd_buf_dump_info *dump_info)
+{
+ long ret = 0;
+ struct cdm_regcontinuous_cmd *p_regcont_cmd;
+ uint32_t *temp_ptr = cmd_buf_addr;
+ int i = 0;
+ struct cam_cdm_cmd_dump_header *hdr;
+ uint32_t *addr, *start;
+ uint8_t *dst;
+ uint32_t min_len, remain_len;
+
+ p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
+ temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+ CAM_DBG(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
+ p_regcont_cmd->count, p_regcont_cmd->offset);
+
+ min_len = (sizeof(uint32_t) * p_regcont_cmd->count) +
+ sizeof(struct cam_cdm_cmd_dump_header);
+ remain_len = dump_info->dst_max_size - dump_info->dst_offset;
+ if (remain_len < min_len) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return ret;
+ }
+ dst = (char *)dump_info->dst_start + dump_info->dst_offset;
+ hdr = (struct cam_cdm_cmd_dump_header *)dst;
+ snprintf(hdr->tag, CAM_CDM_CMD_TAG_MAX_LEN, "CDM_REG_CONT:");
+ hdr->word_size = sizeof(uint32_t);
+ addr = (uint32_t *)(dst + sizeof(struct cam_cdm_cmd_dump_header));
+ start = addr;
+ *addr++ = p_regcont_cmd->offset;
+ *addr++ = p_regcont_cmd->count;
+ for (i = 0; i < p_regcont_cmd->count; i++) {
+ *addr = *temp_ptr;
+ temp_ptr++;
+ addr++;
+ ret++;
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_info->dst_offset += hdr->size +
+ sizeof(struct cam_cdm_cmd_dump_header);
+ return ret;
+}
+
+static long cam_cdm_util_dump_reg_random_cmd_v2(
+ uint32_t *cmd_buf_addr,
+ struct cam_cdm_cmd_buf_dump_info *dump_info)
+{
+ struct cdm_regrandom_cmd *p_regrand_cmd;
+ uint32_t *temp_ptr = cmd_buf_addr;
+ long ret = 0;
+ int i = 0;
+ uint32_t *addr, *start;
+ struct cam_cdm_cmd_dump_header *hdr;
+ uint8_t *dst;
+ uint32_t min_len, remain_len;
+
+ p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
+ temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+ ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+
+ min_len = (2 * sizeof(uint32_t) * p_regrand_cmd->count) +
+ sizeof(struct cam_cdm_cmd_dump_header);
+ remain_len = dump_info->dst_max_size - dump_info->dst_offset;
+ if (remain_len < min_len) {
+ CAM_ERR_RATE_LIMIT(CAM_CDM, "Dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return ret;
+ }
+ dst = (char *)dump_info->dst_start + dump_info->dst_offset;
+ hdr = (struct cam_cdm_cmd_dump_header *)dst;
+ snprintf(hdr->tag, CAM_CDM_CMD_TAG_MAX_LEN, "CDM_REG_RANDOM:");
+ hdr->word_size = sizeof(uint32_t);
+ addr = (uint32_t *)(dst + sizeof(struct cam_cdm_cmd_dump_header));
+ start = addr;
+ *addr++ = p_regrand_cmd->count;
+ for (i = 0; i < p_regrand_cmd->count; i++) {
+ addr[0] = temp_ptr[0] & CAM_CDM_REG_OFFSET_MASK;
+ addr[1] = temp_ptr[1];
+ temp_ptr += 2;
+ addr += 2;
+ ret += 2;
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_info->dst_offset += hdr->size +
+ sizeof(struct cam_cdm_cmd_dump_header);
+ return ret;
+}
+
+void cam_cdm_util_dump_cmd_bufs_v2(
+ struct cam_cdm_cmd_buf_dump_info *dump_info)
+{
+ uint32_t cmd = 0;
+ uint32_t *buf_now;
+
+ if (!dump_info || !dump_info->src_start || !dump_info->src_end ||
+ !dump_info->dst_start) {
+ CAM_INFO(CAM_CDM, "Invalid args");
+ return;
+ }
+ buf_now = dump_info->src_start;
+ do {
+ cmd = *dump_info->src_start;
+ cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
+
+ switch (cmd) {
+ case CAM_CDM_CMD_DMI:
+ case CAM_CDM_CMD_DMI_32:
+ case CAM_CDM_CMD_DMI_64:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
+ break;
+ case CAM_CDM_CMD_REG_CONT:
+ buf_now += cam_cdm_util_dump_reg_cont_cmd_v2(buf_now,
+ dump_info);
+ break;
+ case CAM_CDM_CMD_REG_RANDOM:
+ buf_now += cam_cdm_util_dump_reg_random_cmd_v2(buf_now,
+ dump_info);
+ break;
+ case CAM_CDM_CMD_BUFF_INDIRECT:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
+ break;
+ case CAM_CDM_CMD_GEN_IRQ:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
+ break;
+ case CAM_CDM_CMD_WAIT_EVENT:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
+ break;
+ case CAM_CDM_CMD_CHANGE_BASE:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
+ break;
+ case CAM_CDM_CMD_PERF_CTRL:
+ buf_now += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
+ break;
+ default:
+ CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
+ cmd, *buf_now);
+ buf_now++;
+ break;
+ }
+ } while (buf_now <= dump_info->src_end);
+
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
index 8f2b48853ca8..d8dc31cc572d 100644
--- a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,9 @@
#ifndef _CAM_CDM_UTIL_H_
#define _CAM_CDM_UTIL_H_
+/* Max len for tag name for header while dumping cmd buffer*/
+#define CAM_CDM_CMD_TAG_MAX_LEN 32
+
enum cam_cdm_command {
CAM_CDM_CMD_UNUSED = 0x0,
CAM_CDM_CMD_DMI = 0x1,
@@ -152,6 +155,34 @@ void (*cdm_write_genirq)(
};
/**
+ * struct cam_cdm_cmd_buf_dump_info; - Camera CDM dump info
+ * @src_start: source start address
+ * @src_end: source end address
+ * @dst_start: dst start address
+ * @dst_offset: dst offset
+ * @dst_max_size max size of destination buffer
+ */
+struct cam_cdm_cmd_buf_dump_info {
+ uint32_t *src_start;
+ uint32_t *src_end;
+ uintptr_t dst_start;
+ uint32_t dst_offset;
+ uint32_t dst_max_size;
+};
+
+/**
+ * struct cam_cdm_cmd_dump_header- Camera CDM dump header
+ * @tag: tag name for header
+ * @size: size of data
+ * @word_size: size of each word
+ */
+struct cam_cdm_cmd_dump_header {
+ char tag[CAM_CDM_CMD_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
+/**
* cam_cdm_util_log_cmd_bufs()
*
* @brief: Util function to log cdm command buffers
@@ -163,6 +194,17 @@ void (*cdm_write_genirq)(
void cam_cdm_util_dump_cmd_buf(
uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
+/**
+ * cam_cdm_util_dump_cmd_bufs_v2()
+ *
+ * @brief: Util function to log cdm command buffers
+ * to a buffer
+ *
+ * @dump_info: Information about source and destination buffers
+ *
+ */
+void cam_cdm_util_dump_cmd_bufs_v2(
+ struct cam_cdm_cmd_buf_dump_info *dump_info);
#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
index 84a190a4c5a6..a3577f09dd78 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context.h
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -25,6 +25,9 @@ struct cam_context;
/* max device name string length*/
#define CAM_CTX_DEV_NAME_MAX_LENGTH 20
+/* max tag dump header string length*/
+#define CAM_CONTEXT_DUMP_TAG_MAX_LEN 32
+
/* max request number */
#define CAM_CTX_REQ_MAX 20
#define CAM_CTX_CFG_MAX 20
@@ -230,6 +233,19 @@ struct cam_context {
};
/**
+ * struct cam_context_dump_header - Function for context dump header
+ *
+ * @tag : Tag for context dump header
+ * @size : Size of data
+ * @word_size : Word size of data
+ */
+struct cam_context_dump_header {
+ char tag[CAM_CONTEXT_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
+/**
* cam_context_shutdown()
*
* @brief: Calls while device close or shutdown
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
index 1705b536bc37..211146d36ea0 100644
--- a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -1045,6 +1045,72 @@ end:
return rc;
}
+static int cam_context_dump_context(struct cam_context *ctx,
+ struct cam_hw_dump_args *dump_args)
+{
+ int rc = 0;
+ struct cam_context_dump_header *hdr;
+ char *dst;
+ uint64_t *addr, *start;
+ uintptr_t cpu_addr;
+ size_t buf_len;
+ uint32_t min_len, remain_len;
+ struct cam_ctx_request *req;
+ int i;
+
+ if (list_empty(&ctx->active_req_list)) {
+ CAM_ERR(CAM_CTXT, "[%s][%d] no active request",
+ ctx->dev_name, ctx->ctx_id);
+ return -EIO;
+ }
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &cpu_addr, &buf_len);
+ if (!cpu_addr || !buf_len || rc) {
+ CAM_ERR(CAM_CTXT,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle, buf_len, rc);
+ return rc;
+ }
+ remain_len = buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_context_dump_header) +
+ CAM_CONTEXT_DUMP_TAG_MAX_LEN);
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_CTXT, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ goto end;
+ }
+ dst = (char *)cpu_addr + dump_args->offset;
+ hdr = (struct cam_context_dump_header *)dst;
+ snprintf(hdr->tag, CAM_CONTEXT_DUMP_TAG_MAX_LEN,
+ "%s_CTXT_DUMP:", ctx->dev_name);
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst + sizeof(struct cam_context_dump_header));
+ start = addr;
+ req = list_first_entry(&ctx->active_req_list,
+ struct cam_ctx_request, list);
+ *addr++ = ctx->ctx_id;
+ *addr++ = refcount_read(&(ctx->refcount.refcount));
+ *addr++ = ctx->last_flush_req;
+ *addr++ = ctx->state;
+ *addr++ = req->num_out_map_entries;
+ for (i = 0; i < req->num_out_map_entries; i++)
+ if (req->out_map_entries[i].sync_id != -1)
+ *addr++ = req->out_map_entries[i].sync_id;
+ for (i = 0; i < req->num_in_map_entries; i++)
+ if (req->in_map_entries[i].sync_id != -1)
+ *addr++ = req->in_map_entries[i].sync_id;
+ *addr++ = req->num_in_map_entries;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_context_dump_header);
+end:
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_CTXT, "Cpu put failed handle %u",
+ dump_args->buf_handle);
+ return rc;
+}
+
int32_t cam_context_dump_dev_to_hw(struct cam_context *ctx,
struct cam_dump_req_cmd *cmd)
{
@@ -1074,11 +1140,14 @@ int32_t cam_context_dump_dev_to_hw(struct cam_context *ctx,
ctx->dev_name, ctx->ctx_id, dump_args.buf_handle);
return rc;
}
- CAM_INFO(CAM_CTXT, "[%s] ctx: %d Filled Length %d",
- ctx->dev_name, ctx->ctx_id,
- dump_args.offset - cmd->offset);
- /* Drivers update offest upto which the buffer is written*/
- cmd->offset = dump_args.offset;
+ if (dump_args.offset != cmd->offset) {
+ cam_context_dump_context(ctx, &dump_args);
+ CAM_INFO(CAM_CTXT, "[%s] ctx: %d Filled Length %d",
+ ctx->dev_name, ctx->ctx_id,
+ dump_args.offset - cmd->offset);
+ /* Drivers update the offest */
+ cmd->offset = dump_args.offset;
+ }
} else {
CAM_INFO(CAM_CTXT, "%s hw dump not registered", ctx->dev_name);
}
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
index 70ff72c39028..9e14e40127c0 100644
--- a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -124,6 +124,18 @@ static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
return rc;
}
+static int __cam_fd_ctx_dump_dev_in_activated(struct cam_context *ctx,
+ struct cam_dump_req_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_dump_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_FD, "Failed to dump device, rc=%d", rc);
+
+ return rc;
+}
+
static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
@@ -203,6 +215,7 @@ static struct cam_ctx_ops
.release_dev = __cam_fd_ctx_release_dev_in_activated,
.config_dev = __cam_fd_ctx_config_dev_in_activated,
.flush_dev = __cam_fd_ctx_flush_dev_in_activated,
+ .dump_dev = __cam_fd_ctx_dump_dev_in_activated,
},
.crm_ops = {},
.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
index b2838b4a902f..91a1b141e659 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -910,6 +910,7 @@ static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
hw_device->req_id = frame_req->request_id;
mutex_unlock(&hw_device->lock);
+ cam_common_util_get_curr_timestamp(&frame_req->submit_timestamp);
rc = cam_fd_mgr_util_put_frame_req(
&hw_mgr->frame_processing_list, &frame_req);
if (rc) {
@@ -1530,6 +1531,110 @@ static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
return rc;
}
+static int cam_fd_mgr_hw_dump(void *hw_mgr_priv,
+ void *hw_dump_args)
+{
+ struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+ struct cam_hw_dump_args *dump_args = hw_dump_args;
+ struct cam_fd_mgr_frame_request *frame_req, *req_temp;
+ uint64_t diff;
+ struct timeval cur_time;
+ int rc = 0;
+ struct cam_fd_hw_mgr_ctx *hw_ctx =
+ (struct cam_fd_hw_mgr_ctx *)dump_args->ctxt_to_hw_map;
+ struct cam_fd_device *hw_device;
+ char *dst;
+ struct cam_fd_hw_dump_args fd_dump_args;
+ struct cam_fd_hw_dump_header *hdr;
+ uint64_t *addr, *start;
+ uint32_t min_len, remain_len;
+
+ rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+ return rc;
+ }
+ list_for_each_entry_safe(frame_req, req_temp,
+ &hw_mgr->frame_processing_list, list) {
+ if (frame_req->request_id == dump_args->request_id)
+ goto hw_dump;
+ }
+ return rc;
+hw_dump:
+ cam_common_util_get_curr_timestamp(&cur_time);
+ diff = cam_common_util_get_time_diff(&cur_time,
+ &frame_req->submit_timestamp);
+ if (diff < CAM_FD_RESPONSE_TIME_THRESHOLD) {
+ CAM_INFO(CAM_FD, "No Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ frame_req->submit_timestamp.tv_sec,
+ frame_req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ return 0;
+ }
+ CAM_INFO(CAM_FD, "Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ frame_req->submit_timestamp.tv_sec,
+ frame_req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &fd_dump_args.cpu_addr, &fd_dump_args.buf_len);
+ if (!fd_dump_args.cpu_addr || !fd_dump_args.buf_len || rc) {
+ CAM_ERR(CAM_FD,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle, fd_dump_args.buf_len, rc);
+ return rc;
+ }
+ remain_len = fd_dump_args.buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_fd_hw_dump_header) +
+ CAM_FD_HW_DUMP_TAG_MAX_LEN);
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_FD, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ goto end;
+ }
+ dst = (char *)fd_dump_args.cpu_addr + dump_args->offset;
+ hdr = (struct cam_fd_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_FD_HW_DUMP_TAG_MAX_LEN,
+ "FD_REQ:");
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst + sizeof(struct cam_fd_hw_dump_header));
+ start = addr;
+ *addr++ = frame_req->request_id;
+ *addr++ = frame_req->submit_timestamp.tv_sec;
+ *addr++ = frame_req->submit_timestamp.tv_usec;
+ *addr++ = cur_time.tv_sec;
+ *addr++ = cur_time.tv_usec;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_fd_hw_dump_header);
+
+ fd_dump_args.request_id = dump_args->request_id;
+ fd_dump_args.offset = dump_args->offset;
+ if (hw_device->hw_intf->hw_ops.process_cmd) {
+ rc = hw_device->hw_intf->hw_ops.process_cmd(
+ hw_device->hw_intf->hw_priv,
+ CAM_FD_HW_CMD_HW_DUMP,
+ &fd_dump_args,
+ sizeof(struct
+ cam_fd_hw_dump_args));
+ if (rc) {
+ CAM_ERR(CAM_FD, "Hw Dump cmd fails req %lld rc %d",
+ frame_req->request_id, rc);
+ goto end;
+ }
+ }
+ dump_args->offset = fd_dump_args.offset;
+end:
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_FD, "Cpu put failed handle %u",
+ dump_args->buf_handle);
+ return rc;
+}
+
static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
{
struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
@@ -1977,6 +2082,7 @@ int cam_fd_hw_mgr_init(struct device_node *of_node,
hw_mgr_intf->hw_write = NULL;
hw_mgr_intf->hw_close = NULL;
hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
+ hw_mgr_intf->hw_dump = cam_fd_mgr_hw_dump;
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
index a903ab091c73..87464a37372c 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,11 @@
#define CAM_FD_HW_MAX 1
#define CAM_FD_WORKQ_NUM_TASK 10
+/*
+ * Response time threshold in ms beyond which a request is not expected to be
+ * with FD hw
+ */
+#define CAM_FD_RESPONSE_TIME_THRESHOLD 100000
struct cam_fd_hw_mgr;
@@ -107,6 +112,7 @@ struct cam_fd_device {
* @hw_update_entries : HW update entries corresponding to this request
* which needs to be submitted to HW through CDM
* @num_hw_update_entries : Number of HW update entries
+ * @submit_timestamp : Time stamp for submit req with hw
*/
struct cam_fd_mgr_frame_request {
struct list_head list;
@@ -115,6 +121,7 @@ struct cam_fd_mgr_frame_request {
struct cam_fd_hw_req_private hw_req_private;
struct cam_hw_update_entry hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
uint32_t num_hw_update_entries;
+ struct timeval submit_timestamp;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
index 8be20a772129..20378cad81a6 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -523,6 +523,59 @@ static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
return 0;
}
+static int cam_fd_hw_util_processcmd_hw_dump(struct cam_hw_info *fd_hw,
+ void *args)
+{
+ struct cam_fd_hw_dump_args *dump_args;
+ struct cam_hw_soc_info *soc_info;
+ int i, j;
+ char *dst;
+ uint32_t *addr, *start;
+ struct cam_fd_hw_dump_header *hdr;
+ uint32_t num_reg, min_len, remain_len;
+
+ mutex_lock(&fd_hw->hw_mutex);
+ if (fd_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
+ CAM_INFO(CAM_FD, "power off state");
+ mutex_unlock(&fd_hw->hw_mutex);
+ return 0;
+ }
+ mutex_unlock(&fd_hw->hw_mutex);
+
+ dump_args = (struct cam_fd_hw_dump_args *)args;
+ soc_info = &fd_hw->soc_info;
+ remain_len = dump_args->buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_fd_hw_dump_header) +
+ CAM_FD_HW_DUMP_TAG_MAX_LEN) +
+ soc_info->reg_map[0].size;
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_FD, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_fd_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_FD_HW_DUMP_TAG_MAX_LEN,
+ "FD_REG:");
+ hdr->word_size = sizeof(uint32_t);
+ addr = (uint32_t *)(dst + sizeof(struct cam_fd_hw_dump_header));
+ start = addr;
+ *addr++ = soc_info->index;
+ num_reg = soc_info->reg_map[0].size/4;
+ for (j = 0; j < soc_info->num_reg_map; j++) {
+ for (i = 0; i < num_reg; i++) {
+ *addr++ = soc_info->mem_block[0]->start + i*4;
+ *addr++ = cam_io_r(soc_info->reg_map[i].mem_base +
+ (i*4));
+ }
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_fd_hw_dump_header);
+ CAM_DBG(CAM_FD, "%d", dump_args->offset);
+ return 0;
+}
+
irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
{
struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
@@ -1163,6 +1216,11 @@ int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
cmd_frame_results);
break;
}
+ case CAM_FD_HW_CMD_HW_DUMP: {
+ rc = cam_fd_hw_util_processcmd_hw_dump(fd_hw,
+ cmd_args);
+ break;
+ }
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
index ef3b6c9314e2..8799a716a733 100644
--- a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
#define CAM_FD_MAX_IO_BUFFERS 5
#define CAM_FD_MAX_HW_ENTRIES 5
+#define CAM_FD_HW_DUMP_TAG_MAX_LEN 32
/**
* enum cam_fd_hw_type - Enum for FD HW type
@@ -87,6 +88,7 @@ enum cam_fd_hw_irq_type {
* @CAM_FD_HW_CMD_FRAME_DONE : Command to process frame done settings
* @CAM_FD_HW_CMD_UPDATE_SOC : Command to process soc update
* @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
+ * @CAM_FD_HW_CMD_HW_DUMP : Command to dump fd hw information
* @CAM_FD_HW_CMD_MAX : Indicates max cmd
*/
enum cam_fd_hw_cmd_type {
@@ -94,6 +96,7 @@ enum cam_fd_hw_cmd_type {
CAM_FD_HW_CMD_FRAME_DONE,
CAM_FD_HW_CMD_UPDATE_SOC,
CAM_FD_HW_CMD_REGISTER_CALLBACK,
+ CAM_FD_HW_CMD_HW_DUMP,
CAM_FD_HW_CMD_MAX,
};
@@ -286,4 +289,32 @@ struct cam_fd_hw_cmd_set_irq_cb {
void *data;
};
+/**
+ * struct cam_fd_hw_dump_args : Args for dump request
+ *
+ * @cpu_addr : start address of the target buffer
+ * @offset : offset of the buffer
+ * @request_id : Issue request id
+ * @buf_len : Length of target buffer
+ */
+struct cam_fd_hw_dump_args {
+ uintptr_t cpu_addr;
+ uint64_t offset;
+ uint64_t request_id;
+ size_t buf_len;
+};
+
+/**
+ * struct cam_fd_hw_dump_header : fd hw dump header
+ *
+ * @tag : fd hw dump header tag
+ * @size : Size of data
+ * @word_size : size of each word
+ */
+struct cam_fd_hw_dump_header {
+ char tag[CAM_FD_HW_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
#endif /* _CAM_FD_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index 41d175188d85..f98ad43acbd6 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -118,6 +118,18 @@ static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
return rc;
}
+static int __cam_icp_dump_dev_in_ready(struct cam_context *ctx,
+ struct cam_dump_req_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_dump_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to dump device");
+
+ return rc;
+}
+
static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
@@ -229,6 +241,7 @@ static struct cam_ctx_ops
.start_dev = __cam_icp_start_dev_in_acquired,
.config_dev = __cam_icp_config_dev_in_ready,
.flush_dev = __cam_icp_flush_dev_in_ready,
+ .dump_dev = __cam_icp_dump_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
@@ -241,6 +254,7 @@ static struct cam_ctx_ops
.release_dev = __cam_icp_release_dev_in_ready,
.config_dev = __cam_icp_config_dev_in_ready,
.flush_dev = __cam_icp_flush_dev_in_ready,
+ .dump_dev = __cam_icp_dump_dev_in_ready,
},
.crm_ops = {},
.irq_ops = __cam_icp_handle_buf_done_in_ready,
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
index 4dbc8f1bd991..41d128a074c7 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -242,6 +242,43 @@ fw_download_failed:
return rc;
}
+static int cam_a5_fw_dump(struct cam_icp_hw_dump_args *dump_args,
+ struct cam_a5_device_core_info *core_info)
+{
+ u8 *dest;
+ u8 *src;
+ uint64_t size_required = 0;
+ struct cam_icp_dump_header *hdr;
+
+ if (!core_info || !dump_args) {
+ CAM_ERR(CAM_ICP, "invalid params %pK %pK",
+ core_info, dump_args);
+ return -EINVAL;
+ }
+ if (!core_info->fw_kva_addr || !dump_args->cpu_addr) {
+ CAM_ERR(CAM_ICP, "invalid params %pK, 0x%zx",
+ core_info->fw_kva_addr, dump_args->cpu_addr);
+ return -EINVAL;
+ }
+ size_required = core_info->fw_buf_len +
+ sizeof(struct cam_icp_dump_header);
+ if ((dump_args->buf_len - dump_args->offset) < size_required) {
+ CAM_WARN(CAM_ICP, "Dump buffer exhaust %lld %lld",
+ size_required, core_info->fw_buf_len);
+ return 0;
+ }
+ dest = (u8 *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_icp_dump_header *)dest;
+ snprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_FW:");
+ hdr->word_size = sizeof(u8);
+ hdr->size = core_info->fw_buf_len;
+ src = (u8 *)core_info->fw_kva_addr;
+ dest = (u8 *)dest + sizeof(struct cam_icp_dump_header);
+ memcpy_fromio(dest, src, core_info->fw_buf_len);
+ dump_args->offset += hdr->size + sizeof(struct cam_icp_dump_header);
+ return 0;
+}
+
int cam_a5_init_hw(void *device_priv,
void *init_hw_args, uint32_t arg_size)
{
@@ -480,6 +517,12 @@ int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
}
rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
break;
+ case CAM_ICP_A5_CMD_HW_DUMP: {
+ struct cam_icp_hw_dump_args *dump_args = cmd_args;
+
+ rc = cam_a5_fw_dump(dump_args, core_info);
+ break;
+ }
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 370768a0609d..5a6bc0b644ca 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -3405,7 +3405,8 @@ static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
-
+ cam_common_util_get_curr_timestamp(
+ &ctx_data->hfi_frame_process.submit_timestamp[idx]);
CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
frame_info->io_config);
@@ -4435,6 +4436,99 @@ static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
return 0;
}
+static int cam_icp_mgr_hw_dump(void *hw_priv, void *hw_dump_args)
+{
+ struct cam_hw_dump_args *dump_args = hw_dump_args;
+ struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+ struct cam_hw_intf *a5_dev_intf = NULL;
+ struct cam_icp_hw_dump_args icp_dump_args;
+ int rc = 0;
+ struct cam_icp_hw_ctx_data *ctx_data;
+ struct hfi_frame_process_info *frm_process;
+ struct timeval cur_time;
+ uint64_t diff;
+ int i;
+ struct cam_icp_dump_header *hdr;
+ uint64_t *addr, *start;
+ uint8_t *dst;
+ uint32_t min_len, remain_len;
+
+ if ((!hw_priv) || (!hw_dump_args)) {
+ CAM_ERR(CAM_ICP, "Input params are Null:");
+ return -EINVAL;
+ }
+ ctx_data = dump_args->ctxt_to_hw_map;
+ CAM_DBG(CAM_ICP, "Req %lld", dump_args->request_id);
+ frm_process = &ctx_data->hfi_frame_process;
+ for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+ if ((frm_process->request_id[i] ==
+ dump_args->request_id) &&
+ frm_process->fw_process_flag[i])
+ goto hw_dump;
+ }
+ return 0;
+hw_dump:
+ cam_common_util_get_curr_timestamp(&cur_time);
+ diff = cam_common_util_get_time_diff(
+ &cur_time,
+ &frm_process->submit_timestamp[i]);
+ if (diff < CAM_ICP_CTX_RESPONSE_TIME_THRESHOLD) {
+ CAM_INFO(CAM_ICP, "No Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ frm_process->submit_timestamp[i].tv_sec,
+ frm_process->submit_timestamp[i].tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ return 0;
+ }
+ CAM_INFO(CAM_ICP, "Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ frm_process->submit_timestamp[i].tv_sec,
+ frm_process->submit_timestamp[i].tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &icp_dump_args.cpu_addr, &icp_dump_args.buf_len);
+ if (!icp_dump_args.cpu_addr || !icp_dump_args.buf_len || rc) {
+ CAM_ERR(CAM_ICP,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle, icp_dump_args.buf_len, rc);
+ return rc;
+ }
+ remain_len = icp_dump_args.buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_icp_dump_header) +
+ CAM_ICP_DUMP_TAG_MAX_LEN);
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_ICP, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ goto end;
+ }
+ dst = (char *)icp_dump_args.cpu_addr + dump_args->offset;
+ hdr = (struct cam_icp_dump_header *)dst;
+ snprintf(hdr->tag, CAM_ICP_DUMP_TAG_MAX_LEN, "ICP_REQ:");
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst + sizeof(struct cam_icp_dump_header));
+ start = addr;
+ *addr++ = frm_process->request_id[i];
+ *addr++ = frm_process->submit_timestamp[i].tv_sec;
+ *addr++ = frm_process->submit_timestamp[i].tv_usec;
+ *addr++ = cur_time.tv_sec;
+ *addr++ = cur_time.tv_usec;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += (hdr->size + sizeof(struct cam_icp_dump_header));
+ /* Dumping the fw image*/
+ icp_dump_args.offset = dump_args->offset;
+ a5_dev_intf = hw_mgr->a5_dev_intf;
+ rc = a5_dev_intf->hw_ops.process_cmd(
+ a5_dev_intf->hw_priv,
+ CAM_ICP_A5_CMD_HW_DUMP, &icp_dump_args,
+ sizeof(struct cam_icp_hw_dump_args));
+ dump_args->offset = icp_dump_args.offset;
+end:
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ return rc;
+}
+
static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
{
struct cam_hw_flush_args *flush_args = hw_flush_args;
@@ -5353,6 +5447,7 @@ int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
+ hw_mgr_intf->hw_dump = cam_icp_mgr_hw_dump;
icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
mutex_init(&icp_hw_mgr.hw_mgr_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index 731d2355882f..d5eb96f805c6 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -70,6 +70,12 @@
#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2
+/*
+ * Response time threshold in ms beyond which a request is not expected
+ * to be with ICP hw
+ */
+#define CAM_ICP_CTX_RESPONSE_TIME_THRESHOLD 300000
+
/**
* struct icp_hfi_mem_info
* @qtbl: Memory info of queue table
@@ -155,6 +161,7 @@ struct icp_frame_info {
* @fw_process_flag: Frame process flag
* @clk_info: Clock information for a request
* @frame_info: information needed to process request
+ * @submit_timestamp: Submit timestamp to hw
*/
struct hfi_frame_process_info {
struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
@@ -169,6 +176,7 @@ struct hfi_frame_process_info {
uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
+ struct timeval submit_timestamp[CAM_FRAME_CMD_MAX];
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
index 17a79d8d3f30..c892a71f1f1b 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -33,6 +33,7 @@ enum cam_icp_a5_cmd_type {
CAM_ICP_A5_CMD_CPAS_STOP,
CAM_ICP_A5_CMD_UBWC_CFG,
CAM_ICP_A5_CMD_PC_PREP,
+ CAM_ICP_A5_CMD_HW_DUMP,
CAM_ICP_A5_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index 2ebe41417d1d..f4e04ffeea4c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -27,6 +27,8 @@
#define CPAS_IPE1_BIT 0x2000
+#define CAM_ICP_DUMP_TAG_MAX_LEN 32
+
int cam_icp_hw_mgr_init(struct device_node *of_node,
uint64_t *hw_mgr_hdl, int *iommu_hdl);
@@ -44,4 +46,28 @@ struct cam_icp_cpas_vote {
uint32_t axi_vote_valid;
};
+/**
+ * struct cam_icp_hw_dump_args
+ * @cpu_addr: kernel vaddr
+ * @buf_len: buffer length
+ * @offset: offset
+ */
+struct cam_icp_hw_dump_args {
+ uintptr_t cpu_addr;
+ size_t buf_len;
+ int32_t offset;
+};
+
+/**
+ * struct cam_icp_dump_header
+ * @tag: tag of the packet
+ * @size: size of data in packet
+ * @word_size: size of each word in packet
+ */
+struct cam_icp_dump_header {
+ char tag[CAM_ICP_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ int32_t word_size;
+};
+
#endif /* CAM_ICP_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index f68cd80ea7cd..ca7380b1d748 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -29,13 +29,123 @@
static const char isp_dev_name[] = "cam-isp";
-#define INC_STATE_MONITOR_HEAD(head) \
+#define INC_HEAD(head, max_entries) \
(atomic64_add_return(1, head) % \
- CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES)
+ max_entries)
static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
uint32_t buf_info);
+static const char *__cam_isp_evt_val_to_type(
+ uint32_t evt_id)
+{
+ switch (evt_id) {
+ case CAM_ISP_CTX_EVENT_SUBMIT:
+ return "SUBMIT";
+ case CAM_ISP_CTX_EVENT_APPLY:
+ return "APPLY";
+ case CAM_ISP_CTX_EVENT_EPOCH:
+ return "EPOCH";
+ case CAM_ISP_CTX_EVENT_RUP:
+ return "RUP";
+ case CAM_ISP_CTX_EVENT_BUFDONE:
+ return "BUFDONE";
+ default:
+ return "CAM_ISP_EVENT_INVALID";
+ }
+}
+
+static void __cam_isp_ctx_update_event_record(
+ struct cam_isp_context *ctx_isp,
+ enum cam_isp_ctx_event event,
+ struct cam_ctx_request *req)
+{
+ int iterator = 0;
+ struct cam_isp_ctx_req *req_isp;
+ struct timeval cur_time;
+
+ switch (event) {
+ case CAM_ISP_CTX_EVENT_EPOCH:
+ case CAM_ISP_CTX_EVENT_RUP:
+ case CAM_ISP_CTX_EVENT_BUFDONE:
+ break;
+ case CAM_ISP_CTX_EVENT_SUBMIT:
+ case CAM_ISP_CTX_EVENT_APPLY:
+ if (!req) {
+ CAM_ERR(CAM_ISP, "Invalid arg for event %d", event);
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ iterator = INC_HEAD(&ctx_isp->event_record_head[event],
+ CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
+ cam_common_util_get_curr_timestamp(&cur_time);
+
+ if (req) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ ctx_isp->event_record[event][iterator].req_id =
+ req->request_id;
+ req_isp->event_timestamp[event].tv_sec =
+ cur_time.tv_sec;
+ req_isp->event_timestamp[event].tv_usec =
+ cur_time.tv_usec;
+ } else {
+ ctx_isp->event_record[event][iterator].req_id = 0;
+ }
+ ctx_isp->event_record[event][iterator].timestamp.tv_sec =
+ cur_time.tv_sec;
+ ctx_isp->event_record[event][iterator].timestamp.tv_usec =
+ cur_time.tv_usec;
+}
+
+static void __cam_isp_ctx_dump_event_record(
+ struct cam_isp_context *ctx_isp,
+ uintptr_t cpu_addr,
+ size_t buf_len,
+ uint32_t *offset)
+{
+ int i, j;
+ struct cam_isp_context_event_record *record;
+ int index;
+ uint64_t state_head;
+ struct cam_isp_context_dump_header *hdr;
+ uint64_t *addr, *start;
+ uint8_t *dst;
+
+ if (!cpu_addr || !buf_len || !offset) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ return;
+ }
+ for (i = 0; i < CAM_ISP_CTX_EVENT_MAX; i++) {
+ state_head = atomic64_read(&ctx_isp->event_record_head[i]);
+ dst = (char *)cpu_addr + *offset;
+ hdr = (struct cam_isp_context_dump_header *)dst;
+ snprintf(hdr->tag,
+ CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN, "ISP_EVT_%s:",
+ __cam_isp_evt_val_to_type(i));
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst +
+ sizeof(struct cam_isp_context_dump_header));
+ start = addr;
+ for (j = CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES - 1; j >= 0;
+ j--) {
+ index = (((state_head - j) +
+ CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES) %
+ CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES);
+ record = &ctx_isp->event_record[i][index];
+ *addr++ = record->req_id;
+ *addr++ = record->timestamp.tv_sec;
+ *addr++ = record->timestamp.tv_usec;
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ *offset += hdr->size +
+ sizeof(struct cam_isp_context_dump_header);
+ }
+}
+
static void __cam_isp_ctx_update_state_monitor_array(
struct cam_isp_context *ctx_isp,
enum cam_isp_hw_event_type hw_event,
@@ -44,7 +154,8 @@ static void __cam_isp_ctx_update_state_monitor_array(
{
int iterator = 0;
- iterator = INC_STATE_MONITOR_HEAD(&ctx_isp->state_monitor_head);
+ iterator = INC_HEAD(&ctx_isp->state_monitor_head,
+ CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES);
ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
curr_state;
ctx_isp->cam_isp_ctx_state_monitor[iterator].next_state =
@@ -160,7 +271,11 @@ static void __cam_isp_ctx_dump_state_monitor_array(
}
}
-static void cam_isp_ctx_dump_req(struct cam_isp_ctx_req *req_isp)
+static void cam_isp_ctx_dump_req(struct cam_isp_ctx_req *req_isp,
+ uintptr_t cpu_addr,
+ size_t buf_len,
+ uint32_t *offset,
+ bool dump_to_buff)
{
int i = 0, rc = 0;
size_t len = 0;
@@ -168,6 +283,7 @@ static void cam_isp_ctx_dump_req(struct cam_isp_ctx_req *req_isp)
uint32_t *buf_start, *buf_end;
size_t remain_len = 0;
bool need_put = false;
+ struct cam_cdm_cmd_buf_dump_info dump_info;
for (i = 0; i < req_isp->num_cfg; i++) {
rc = cam_packet_util_get_cmd_mem_addr(
@@ -202,7 +318,21 @@ put:
req_isp->cfg[i].offset);
buf_end = (uint32_t *)((uint8_t *) buf_start +
req_isp->cfg[i].len - 1);
- cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
+ if (dump_to_buff) {
+ if (!cpu_addr || !offset || !buf_len) {
+ CAM_ERR(CAM_ISP, "Invalid args");
+ break;
+ }
+ dump_info.src_start = buf_start;
+ dump_info.src_end = buf_end;
+ dump_info.dst_start = cpu_addr;
+ dump_info.dst_offset = *offset;
+ dump_info.dst_max_size = buf_len;
+ cam_cdm_util_dump_cmd_bufs_v2(&dump_info);
+ *offset = dump_info.dst_offset;
+ } else {
+ cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
+ }
if (cam_mem_put_cpu_buf(req_isp->cfg[i].handle))
CAM_WARN(CAM_ISP, "Failed to put cpu buf: 0x%x",
req_isp->cfg[i].handle);
@@ -216,6 +346,7 @@ static int __cam_isp_ctx_enqueue_request_in_order(
struct cam_ctx_request *req_current;
struct cam_ctx_request *req_prev;
struct list_head temp_list;
+ struct cam_isp_context *ctx_isp;
INIT_LIST_HEAD(&temp_list);
spin_lock_bh(&ctx->lock);
@@ -246,6 +377,9 @@ static int __cam_isp_ctx_enqueue_request_in_order(
}
}
}
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_SUBMIT, req);
spin_unlock_bh(&ctx->lock);
return 0;
}
@@ -490,7 +624,6 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
req_isp->fence_map_out[j].resource_handle,
req_isp->fence_map_out[j].sync_id,
ctx->ctx_id);
-
rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
CAM_SYNC_STATE_SIGNALED_SUCCESS);
if (rc)
@@ -560,6 +693,8 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
CAM_ISP_HW_EVENT_DONE,
ctx_isp->substate_activated,
ctx_isp->substate_activated);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_BUFDONE, req);
} else {
list_del_init(&req->list);
list_add_tail(&req->list, &ctx->free_req_list);
@@ -574,6 +709,8 @@ static int __cam_isp_ctx_handle_buf_done_in_activated_state(
CAM_ISP_HW_EVENT_DONE,
ctx_isp->substate_activated,
ctx_isp->substate_activated);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_BUFDONE, req);
}
if (ctx_isp->active_req_cnt && ctx_isp->irq_delay_detect) {
@@ -742,6 +879,10 @@ static int __cam_isp_ctx_reg_upd_in_activated_state(
CAM_DBG(CAM_REQ,
"move request %lld to active list(cnt = %d), ctx %u",
req->request_id, ctx_isp->active_req_cnt, ctx->ctx_id);
+
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_RUP, req);
+
} else {
/* no io config, so the request is completed. */
list_add_tail(&req->list, &ctx->free_req_list);
@@ -776,7 +917,7 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
int rc = 0;
struct cam_req_mgr_trigger_notify notify;
struct cam_context *ctx = ctx_isp->base;
- struct cam_ctx_request *req;
+ struct cam_ctx_request *req = NULL;
uint64_t request_id = 0;
/*
@@ -807,6 +948,8 @@ static int __cam_isp_ctx_notify_sof_in_activated_state(
ctx_isp->req_info.reported_req_id = request_id;
ctx_isp->req_info.last_reported_id_time_stamp =
jiffies_to_msecs(jiffies);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, req);
break;
}
}
@@ -973,6 +1116,8 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, NULL);
goto end;
}
@@ -1018,6 +1163,8 @@ static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
}
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_ERROR);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, req);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
CAM_DBG(CAM_ISP, "next substate %d",
@@ -1128,7 +1275,8 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
__cam_isp_ctx_dump_state_monitor_array(ctx_isp, true);
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
-
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, NULL);
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
goto end;
}
@@ -1175,13 +1323,20 @@ static int __cam_isp_ctx_epoch_in_bubble_applied(
jiffies_to_msecs(jiffies);
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_ERROR);
- } else
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, req);
+ } else {
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
- } else
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, NULL);
+ }
+ } else {
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
-
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_EPOCH, NULL);
+ }
ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
end:
@@ -1253,7 +1408,7 @@ static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
if (error_event_data->enable_reg_dump)
- cam_isp_ctx_dump_req(req_isp);
+ cam_isp_ctx_dump_req(req_isp, 0, 0, NULL, false);
list_for_each_entry_safe(req, req_temp,
&ctx->active_req_list, list) {
@@ -1916,6 +2071,8 @@ static int __cam_isp_ctx_apply_req_in_activated_state(
CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
next_state,
ctx_isp->req_info.last_applied_req_id);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_APPLY, req);
spin_unlock_bh(&ctx->lock);
}
end:
@@ -1971,6 +2128,160 @@ static int __cam_isp_ctx_apply_req_in_bubble(
return rc;
}
+static int __cam_isp_ctx_dump_req_info(struct cam_context *ctx,
+ struct cam_ctx_request *req,
+ uintptr_t cpu_addr,
+ size_t buf_len,
+ uint32_t *offset)
+{
+ int rc = 0;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_isp_context *ctx_isp;
+ int i;
+ struct cam_isp_context_dump_header *hdr;
+ int32_t *addr, *start;
+ uint8_t *dst;
+
+ if (!req || !ctx || !offset || !cpu_addr || !buf_len) {
+ CAM_ERR(CAM_ISP, "Invalid parameters %pK %pK %u %pK %pK %pK ",
+ req, ctx, offset, cpu_addr, buf_len);
+ return -EINVAL;
+ }
+ req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+ ctx_isp = (struct cam_isp_context *)ctx->ctx_priv;
+ dst = (char *)cpu_addr + *offset;
+ hdr = (struct cam_isp_context_dump_header *)dst;
+ hdr->word_size = sizeof(int32_t);
+ snprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
+ "ISP_OUT_FENCE:");
+ addr = (int32_t *)(dst + sizeof(struct cam_isp_context_dump_header));
+ start = addr;
+ for (i = 0; i < req_isp->num_fence_map_out; i++) {
+ if (req_isp->fence_map_out[i].sync_id != -1) {
+ *addr++ = req_isp->fence_map_out[i].resource_handle;
+ *addr++ = req_isp->fence_map_out[i].sync_id;
+ }
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ *offset += hdr->size + sizeof(struct cam_isp_context_dump_header);
+ cam_isp_ctx_dump_req(req_isp, cpu_addr, buf_len,
+ offset, true);
+ return rc;
+}
+
+static int __cam_isp_ctx_dump_in_top_state(struct cam_context *ctx,
+ struct cam_req_mgr_dump_info *dump_info)
+{
+ struct cam_ctx_request *req = NULL;
+ struct cam_isp_ctx_req *req_isp;
+ struct cam_ctx_request *req_temp;
+ struct cam_hw_dump_args dump_args;
+ struct cam_isp_context *ctx_isp;
+ uint64_t diff = 0;
+ struct timeval cur_time;
+ int rc = 0;
+ uintptr_t cpu_addr;
+ size_t buf_len;
+ struct cam_isp_context_dump_header *hdr;
+ uint64_t *addr, *start;
+ uint8_t *dst;
+ bool is_dump_only_event_record = false;
+
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->active_req_list, list) {
+ if (req->request_id == dump_info->req_id) {
+ CAM_DBG(CAM_ISP, "isp active dumping req: %lld",
+ dump_info->req_id);
+ goto hw_dump;
+ }
+ }
+ list_for_each_entry_safe(req, req_temp,
+ &ctx->wait_req_list, list) {
+ if (req->request_id == dump_info->req_id) {
+ CAM_ERR(CAM_ISP, "isp dumping wait req: %lld",
+ dump_info->req_id);
+ goto hw_dump;
+ }
+ }
+ return rc;
+hw_dump:
+ if (req) {
+ req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+ cam_common_util_get_curr_timestamp(&cur_time);
+ diff = cam_common_util_get_time_diff(&cur_time,
+ &req_isp->event_timestamp[CAM_ISP_CTX_EVENT_APPLY]);
+ if (diff < CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD) {
+ CAM_INFO(CAM_ISP, "req %lld found no error",
+ req->request_id);
+ is_dump_only_event_record = true;
+ }
+ ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+ rc = cam_mem_get_cpu_buf(dump_info->buf_handle,
+ &cpu_addr, &buf_len);
+ if (!cpu_addr || !buf_len || rc) {
+ CAM_ERR(CAM_ISP,
+ "lnvalid addr %u len %zu rc %d",
+ dump_info->buf_handle, buf_len, rc);
+ return rc;
+ }
+ /* we take for isp sw information to be max as 2048*/
+ if ((buf_len - dump_info->offset) <
+ CAM_ISP_CTX_DUMP_MIN_LENGTH) {
+ CAM_ERR(CAM_ISP, "Dump buffer exhaust %u %u",
+ buf_len, dump_info->offset);
+ goto end;
+ }
+ dst = (char *)cpu_addr + dump_info->offset;
+ hdr = (struct cam_isp_context_dump_header *)dst;
+ snprintf(hdr->tag, CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN,
+ "ISP_CTX_DUMP:");
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst +
+ sizeof(struct cam_isp_context_dump_header));
+ start = addr;
+ *addr++ = req->request_id;
+ *addr++ = req_isp->event_timestamp
+ [CAM_ISP_CTX_EVENT_APPLY].tv_sec;
+ *addr++ = req_isp->event_timestamp
+ [CAM_ISP_CTX_EVENT_APPLY].tv_usec;
+ *addr++ = cur_time.tv_sec;
+ *addr++ = cur_time.tv_usec;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_info->offset += hdr->size +
+ sizeof(struct cam_isp_context_dump_header);
+
+ spin_lock_bh(&ctx->lock);
+ __cam_isp_ctx_dump_event_record(ctx_isp, cpu_addr,
+ buf_len, &dump_info->offset);
+ spin_unlock_bh(&ctx->lock);
+ if (is_dump_only_event_record)
+ goto end;
+ rc = __cam_isp_ctx_dump_req_info(ctx, req, cpu_addr,
+ buf_len, &dump_info->offset);
+ if (rc) {
+ CAM_ERR(CAM_ISP, "Dump Req info fail %lld",
+ req->request_id);
+ goto end;
+ }
+ if (ctx->hw_mgr_intf->hw_dump) {
+ dump_args.offset = dump_info->offset;
+ dump_args.request_id = dump_info->req_id;
+ dump_args.buf_handle = dump_info->buf_handle;
+ dump_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+ rc = ctx->hw_mgr_intf->hw_dump(
+ ctx->hw_mgr_intf->hw_mgr_priv,
+ &dump_args);
+ dump_info->offset = dump_args.offset;
+ }
+end:
+ rc = cam_mem_put_cpu_buf(dump_info->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Cpu put failed handle %u",
+ dump_info->buf_handle);
+ }
+ return rc;
+}
+
static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
{
@@ -2510,7 +2821,7 @@ static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
struct cam_isp_context *ctx_isp, void *evt_data)
{
- struct cam_ctx_request *req;
+ struct cam_ctx_request *req = NULL;
struct cam_context *ctx = ctx_isp->base;
struct cam_isp_ctx_req *req_isp = NULL;
struct cam_req_mgr_trigger_notify notify;
@@ -2572,6 +2883,8 @@ static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_RUP, req);
CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
return 0;
@@ -2579,6 +2892,8 @@ error:
/* Send SOF event as idle frame*/
__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+ __cam_isp_ctx_update_event_record(ctx_isp,
+ CAM_ISP_CTX_EVENT_RUP, NULL);
/*
* There is no request in the pending list, move the sub state machine
@@ -3839,6 +4154,7 @@ static struct cam_ctx_ops
.unlink = __cam_isp_ctx_unlink_in_acquired,
.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
+ .dump_req = __cam_isp_ctx_dump_in_top_state,
},
.irq_ops = NULL,
.pagefault_ops = cam_isp_context_dump_active_request,
@@ -3854,6 +4170,7 @@ static struct cam_ctx_ops
.crm_ops = {
.unlink = __cam_isp_ctx_unlink_in_ready,
.flush_req = __cam_isp_ctx_flush_req_in_ready,
+ .dump_req = __cam_isp_ctx_dump_in_top_state,
},
.irq_ops = NULL,
.pagefault_ops = cam_isp_context_dump_active_request,
@@ -3871,6 +4188,7 @@ static struct cam_ctx_ops
.apply_req = __cam_isp_ctx_apply_req,
.flush_req = __cam_isp_ctx_flush_req_in_top_state,
.process_evt = __cam_isp_ctx_process_evt,
+ .dump_req = __cam_isp_ctx_dump_in_top_state,
},
.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
.pagefault_ops = cam_isp_context_dump_active_request,
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
index cb73252363db..5620e148cb83 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -44,6 +44,21 @@
*/
#define CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES 20
+/*
+ * Response time in ms threshold beyond which a request is not expected
+ * to be with IFE hw
+ */
+#define CAM_ISP_CTX_RESPONSE_TIME_THRESHOLD 100000
+
+/* Min length for dumping isp context */
+#define CAM_ISP_CTX_DUMP_MIN_LENGTH 2048
+
+/* Maximum entries in event record */
+#define CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES 20
+
+/* Maximum length of tag while dumping */
+#define CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN 32
+
/* forward declaration */
struct cam_isp_context;
@@ -67,6 +82,19 @@ enum cam_isp_ctx_activated_substate {
};
/**
+ * enum cam_isp_ctx_event_type - events for a request
+ *
+ */
+enum cam_isp_ctx_event {
+ CAM_ISP_CTX_EVENT_SUBMIT,
+ CAM_ISP_CTX_EVENT_APPLY,
+ CAM_ISP_CTX_EVENT_EPOCH,
+ CAM_ISP_CTX_EVENT_RUP,
+ CAM_ISP_CTX_EVENT_BUFDONE,
+ CAM_ISP_CTX_EVENT_MAX,
+};
+
+/**
* struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
*
* @irq_ops: Array of handle function pointers.
@@ -92,6 +120,8 @@ struct cam_isp_ctx_irq_ops {
* @bubble_report: Flag to track if bubble report is active on
* current request
* @hw_update_data: HW update data for this request
+ * @event_timestamp: Timestamp for different stage of request
+ * @bubble_detected: Flag to indicate if bubble detected
*
*/
struct cam_isp_ctx_req {
@@ -107,6 +137,8 @@ struct cam_isp_ctx_req {
uint32_t num_acked;
int32_t bubble_report;
struct cam_isp_prepare_hw_update_data hw_update_data;
+ struct timeval event_timestamp
+ [CAM_ISP_CTX_EVENT_MAX];
bool bubble_detected;
};
@@ -156,6 +188,21 @@ struct cam_isp_context_req_id_info {
int64_t last_reported_id_time_stamp;
};
+
+/**
+ * struct cam_isp_context_event_record - ISP context request id
+ * information for last 20 Events. Submit, Applied, SOF, EPOCH,
+ * RUP, Buf done
+ *
+ *@req_id: Last applied request id
+ *@timestamp: Timestamp for the event
+ *
+ */
+struct cam_isp_context_event_record {
+ int64_t req_id;
+ struct timeval timestamp;
+};
+
/**
* struct cam_isp_context - ISP context object
*
@@ -180,6 +227,8 @@ struct cam_isp_context_req_id_info {
* @req_info Request id information about last applied,
* reported and buf done
* @cam_isp_ctx_state_monitor: State monitoring array
+ * @event_record_head: Write index to the state monitoring array
+ * @event_record: Event record array
* @rdi_only_context: Get context type information.
* true, if context is rdi only context
* @hw_acquired: Indicate whether HW resources are acquired
@@ -213,6 +262,10 @@ struct cam_isp_context {
struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
struct cam_isp_context_req_id_info req_info;
+ atomic64_t event_record_head[
+ CAM_ISP_CTX_EVENT_MAX];
+ struct cam_isp_context_event_record event_record[
+ CAM_ISP_CTX_EVENT_MAX][CAM_ISP_CTX_EVENT_RECORD_MAX_ENTRIES];
bool rdi_only_context;
bool hw_acquired;
bool init_received;
@@ -223,6 +276,19 @@ struct cam_isp_context {
};
/**
+ * struct cam_isp_context_dump_header - ISP context dump header
+ * @tag: Tag name for the header
+ * @word_size: Size of word
+ * @size: Size of data
+ *
+ */
+struct cam_isp_context_dump_header {
+ char tag[CAM_ISP_CONTEXT_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
+/**
* cam_isp_context_init()
*
* @brief: Initialization function for the ISP context
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 30c0d6efac41..199bf24e8e5a 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -2958,8 +2958,11 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
struct cam_isp_stop_args stop_isp;
struct cam_ife_hw_mgr_ctx *ctx;
struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
struct cam_isp_resource_node *rsrc_node = NULL;
- uint32_t i, camif_debug;
+ uint32_t i, j, camif_debug;
+ uint32_t enable_dmi_dump;
+ struct cam_isp_hw_get_cmd_update cmd_update;
if (!hw_mgr_priv || !start_isp) {
CAM_ERR(CAM_ISP, "Invalid arguments");
@@ -3017,6 +3020,24 @@ static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
}
}
+ enable_dmi_dump = g_ife_hw_mgr.debug_cfg.enable_dmi_dump;
+ for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+ hw_mgr_res = &ctx->res_list_ife_out[i];
+ for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+ if (!hw_mgr_res->hw_res[j])
+ continue;
+ hw_intf = hw_mgr_res->hw_res[j]->hw_intf;
+ cmd_update.res = hw_mgr_res->hw_res[j];
+ cmd_update.cmd_type =
+ CAM_ISP_HW_CMD_SET_STATS_DMI_DUMP;
+ cmd_update.data = &enable_dmi_dump;
+ hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_SET_STATS_DMI_DUMP,
+ &cmd_update,
+ sizeof(cmd_update));
+ }
+ }
+
rc = cam_ife_hw_mgr_init_hw(ctx);
if (rc) {
CAM_ERR(CAM_ISP, "Init failed");
@@ -4576,6 +4597,114 @@ static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
return rc;
}
+static int cam_ife_mgr_dump(void *hw_mgr_priv, void *args)
+{
+ struct cam_isp_hw_dump_args isp_hw_dump_args;
+ struct cam_hw_dump_args *dump_args = (struct cam_hw_dump_args *)args;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ struct cam_ife_hw_mgr_ctx *ife_ctx = (struct cam_ife_hw_mgr_ctx *)
+ dump_args->ctxt_to_hw_map;
+ int i;
+ int rc = 0;
+
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &isp_hw_dump_args.cpu_addr,
+ &isp_hw_dump_args.buf_len);
+ if (!isp_hw_dump_args.cpu_addr ||
+ !isp_hw_dump_args.buf_len || rc) {
+ CAM_ERR(CAM_ISP,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle,
+ isp_hw_dump_args.buf_len,
+ rc);
+ return rc;
+ }
+ isp_hw_dump_args.offset = dump_args->offset;
+ isp_hw_dump_args.req_id = dump_args->request_id;
+
+ list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_csid, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ switch (hw_mgr_res->hw_res[i]->res_id) {
+ case CAM_IFE_PIX_PATH_RES_RDI_0:
+ case CAM_IFE_PIX_PATH_RES_RDI_1:
+ case CAM_IFE_PIX_PATH_RES_RDI_2:
+ case CAM_IFE_PIX_PATH_RES_RDI_3:
+ if (ife_ctx->is_rdi_only_context &&
+ hw_intf->hw_ops.process_cmd) {
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_DUMP_HW,
+ &isp_hw_dump_args,
+ sizeof(struct
+ cam_isp_hw_dump_args));
+ }
+ break;
+ case CAM_IFE_PIX_PATH_RES_IPP:
+ if (hw_intf->hw_ops.process_cmd) {
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_DUMP_HW,
+ &isp_hw_dump_args,
+ sizeof(struct
+ cam_isp_hw_dump_args));
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "not a valid res %d",
+ hw_mgr_res->res_id);
+ break;
+ }
+ }
+ }
+ list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ if (!hw_mgr_res->hw_res[i])
+ continue;
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ switch (hw_mgr_res->res_id) {
+ case CAM_ISP_HW_VFE_IN_RDI0:
+ case CAM_ISP_HW_VFE_IN_RDI1:
+ case CAM_ISP_HW_VFE_IN_RDI2:
+ case CAM_ISP_HW_VFE_IN_RDI3:
+ if (ife_ctx->is_rdi_only_context &&
+ hw_intf->hw_ops.process_cmd) {
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_DUMP_HW,
+ &isp_hw_dump_args,
+ sizeof(struct
+ cam_isp_hw_dump_args));
+ }
+ break;
+ case CAM_ISP_HW_VFE_IN_CAMIF:
+ if (hw_intf->hw_ops.process_cmd) {
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_DUMP_HW,
+ &isp_hw_dump_args,
+ sizeof(struct
+ cam_isp_hw_dump_args));
+ }
+ break;
+ default:
+ CAM_DBG(CAM_ISP, "not a valid res %d",
+ hw_mgr_res->res_id);
+ break;
+ }
+ }
+ }
+ dump_args->offset = isp_hw_dump_args.offset;
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_FD, "Cpu put failed handle %u",
+ dump_args->buf_handle);
+ return rc;
+}
+
static int cam_ife_mgr_cmd_get_sof_timestamp(
struct cam_ife_hw_mgr_ctx *ife_ctx,
uint64_t *time_stamp,
@@ -5967,6 +6096,27 @@ DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
cam_ife_get_camif_debug,
cam_ife_set_camif_debug, "%16llu");
+static int cam_ife_set_bus_dmi_debug(void *data, u64 val)
+{
+ g_ife_hw_mgr.debug_cfg.enable_dmi_dump = val;
+ CAM_DBG(CAM_ISP,
+ "Set bus enable_dmi_dump_status value :%lld", val);
+ return 0;
+}
+
+static int cam_ife_get_bus_dmi_debug(void *data, u64 *val)
+{
+ *val = g_ife_hw_mgr.debug_cfg.enable_dmi_dump;
+ CAM_DBG(CAM_ISP,
+ "Get bus enable_dmi_dump_status value :%lld",
+ g_ife_hw_mgr.debug_cfg.enable_dmi_dump);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_bus_dmi_debug,
+ cam_ife_get_bus_dmi_debug,
+ cam_ife_set_bus_dmi_debug, "%16llu");
+
static int cam_ife_hw_mgr_debug_register(void)
{
g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
@@ -6008,6 +6158,15 @@ static int cam_ife_hw_mgr_debug_register(void)
CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
goto err;
}
+
+ if (!debugfs_create_file("ife_dmi_dump",
+ 0644,
+ g_ife_hw_mgr.debug_cfg.dentry, NULL,
+ &cam_ife_bus_dmi_debug)) {
+ CAM_ERR(CAM_ISP, "failed to create cam_ife_dmi_dump");
+ goto err;
+ }
+
g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
return 0;
@@ -6190,6 +6349,7 @@ int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
hw_mgr_intf->hw_reset = cam_ife_mgr_reset;
+ hw_mgr_intf->hw_dump = cam_ife_mgr_dump;
if (iommu_hdl)
*iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
index 0e6d79b75232..73066c3e0e15 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -89,6 +89,7 @@ struct ctx_base_info {
* @enable_recovery: enable recovery
* @enable_diag_sensor_status: enable sensor diagnosis status
* @enable_reg_dump: enable register dump on error
+ * @enable_dmi_dump: enable stats dmi and cfg reg dump
*
*/
struct cam_ife_hw_mgr_debug {
@@ -97,6 +98,7 @@ struct cam_ife_hw_mgr_debug {
uint32_t enable_recovery;
uint32_t camif_debug;
uint32_t enable_reg_dump;
+ uint32_t enable_dmi_dump;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
index a3fce7733503..43eb5321f4d8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -3257,7 +3257,54 @@ static int cam_ife_csid_set_sensor_dimension(
csid_hw->rdi_path_config[i].height);
}
}
+ return 0;
+}
+static int cam_ife_csid_dump_hw(
+ struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+ struct cam_hw_soc_info *soc_info;
+ struct cam_isp_hw_dump_args *dump_args =
+ (struct cam_isp_hw_dump_args *)cmd_args;
+ int i;
+ uint32_t *addr, *start;
+ uint32_t num_reg;
+ struct cam_isp_hw_dump_header *hdr;
+ uint8_t *dst;
+
+ if (!dump_args->cpu_addr || !dump_args->buf_len) {
+ CAM_ERR(CAM_ISP,
+ "lnvalid len %zu ", dump_args->buf_len);
+ return -EINVAL;
+ }
+ soc_info = &csid_hw->hw_info->soc_info;
+ /*100 bytes we store the meta info of the dump data*/
+ if ((dump_args->buf_len - dump_args->offset) <
+ soc_info->reg_map[0].size + 100) {
+ CAM_ERR(CAM_ISP, "Dump buffer exhaust");
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_isp_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN,
+ "CSID_REG:");
+ addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header));
+
+ start = addr;
+ num_reg = soc_info->reg_map[0].size/4;
+ hdr->word_size = sizeof(uint32_t);
+ *addr = soc_info->index;
+ addr++;
+ for (i = 0; i < num_reg; i++) {
+ addr[0] = soc_info->mem_block[0]->start + (i*4);
+ addr[1] = cam_io_r(soc_info->reg_map[0].mem_base
+ + (i*4));
+ addr += 2;
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_isp_hw_dump_header);
+ CAM_DBG(CAM_ISP, "offset %d", dump_args->offset);
return 0;
}
@@ -3301,6 +3348,9 @@ static int cam_ife_csid_process_cmd(void *hw_priv,
case CAM_IFE_CSID_SET_SENSOR_DIMENSION_CFG:
rc = cam_ife_csid_set_sensor_dimension(csid_hw, cmd_args);
break;
+ case CAM_ISP_HW_CMD_DUMP_HW:
+ rc = cam_ife_csid_dump_hw(csid_hw, cmd_args);
+ break;
default:
CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
csid_hw->hw_intf->hw_idx, cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index d90030d9ed16..a793fa76bee2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -22,6 +22,9 @@
#define CAM_ISP_FPS_60 60
+/* Maximum length of tag while dumping */
+#define CAM_ISP_HW_DUMP_TAG_MAX_LEN 32
+
/*
* struct cam_isp_timestamp:
*
@@ -110,6 +113,8 @@ enum cam_isp_hw_cmd_type {
CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP,
CAM_ISP_HW_CMD_FPS_CONFIG,
+ CAM_ISP_HW_CMD_DUMP_HW,
+ CAM_ISP_HW_CMD_SET_STATS_DMI_DUMP,
CAM_ISP_HW_CMD_MAX,
};
@@ -252,4 +257,39 @@ struct cam_isp_hw_dual_isp_update_args {
struct cam_isp_resource_node *res;
struct cam_isp_dual_config *dual_cfg;
};
+
+/*
+ * struct cam_isp_hw_dump_args:
+ *
+ * @Brief: isp hw dump args
+ *
+ * @ req_id: request id
+ * @ cpu_addr: cpu address
+ * @ buf_len: buf len
+ * @ offset: offset of buffer
+ * @ ctxt_to_hw_map: ctx to hw map
+ */
+struct cam_isp_hw_dump_args {
+ uint64_t req_id;
+ uintptr_t cpu_addr;
+ size_t buf_len;
+ uint32_t offset;
+ void *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_isp_hw_dump_header - ISP context dump header
+ *
+ * @Brief: isp hw dump header
+ *
+ * @tag: Tag name for the header
+ * @word_size: Size of word
+ * @size: Size of data
+ *
+ */
+struct cam_isp_hw_dump_header {
+ char tag[CAM_ISP_HW_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
#endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 3bcedc948a18..2b073d52778d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -283,6 +283,7 @@ struct cam_vfe_bus_irq_evt_payload {
uint32_t error_type;
struct cam_isp_timestamp ts;
void *ctx;
+ uint32_t enable_dump;
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 162ddadd744f..7fbf1c5b1e22 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -764,6 +764,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
case CAM_ISP_HW_CMD_BW_CONTROL:
case CAM_ISP_HW_CMD_GET_IRQ_REGISTER_DUMP:
case CAM_ISP_HW_CMD_FPS_CONFIG:
+ case CAM_ISP_HW_CMD_DUMP_HW:
rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size);
@@ -773,6 +774,7 @@ int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
case CAM_ISP_HW_CMD_STRIPE_UPDATE:
case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
case CAM_ISP_HW_CMD_UBWC_UPDATE:
+ case CAM_ISP_HW_CMD_SET_STATS_DMI_DUMP:
rc = core_info->vfe_bus->hw_ops.process_cmd(
core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
index 61c1e9e01ba2..dc1ee4cd4cac 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
@@ -108,6 +108,169 @@ struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = {
.enable = 0x0000004C,
};
+static struct cam_vfe_bus_ver2_stats_cfg_info stats_170_info = {
+ .dmi_offset_info = {
+ .auto_increment = 0x00000100,
+ .cfg_offset = 0x00000C24,
+ .addr_offset = 0x00000C28,
+ .data_hi_offset = 0x00000C2C,
+ .data_lo_offset = 0x00000C30,
+ },
+ .stats_cfg_offset = {
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI0 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI1 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI2 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI3 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FD */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_PDAF */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ .cfg_offset = 0x00000AB8,
+ .num_cfg = 0x00000ABC,
+ .cfg_size = 0x00000AC0,
+ .is_lut = 0,
+ .lut = {
+ .size = 0,
+ .bank_0 = 0,
+ .bank_1 = 0,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST */
+ {
+ .res_index =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ .cfg_offset = 0x00000AD4,
+ .num_cfg = 0x00000AD8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x36,
+ .bank_1 = 0x37,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ .cfg_offset = 0x00000AE4,
+ .num_cfg = 0x00000000,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x40,
+ .bank_1 = 0x41,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ .cfg_offset = 0x00000BC8,
+ .num_cfg = 0x00000BCC,
+ .cfg_size = 0x00000BD0,
+ .is_lut = 0,
+ .lut = {
+ .size = 0,
+ .bank_0 = 0,
+ .bank_1 = 0,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ .cfg_offset = 0x00000BE4,
+ .num_cfg = 0x00000BE8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3A,
+ .bank_1 = 0,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ .cfg_offset = 0x00000BEC,
+ .num_cfg = 0x00000BF0,
+ .cfg_size = 0x00000BF4,
+ .is_lut = 0,
+ .lut = {
+ .size = 0,
+ .bank_0 = 0,
+ .bank_1 = 0,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ .cfg_offset = 0x00000BF8,
+ .num_cfg = 0x00000BFC,
+ .cfg_size = 0x00000C00,
+ .is_lut = 0,
+ .lut = {
+ .size = 0,
+ .bank_0 = 0,
+ .bank_1 = 0,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ .cfg_offset = 0x00000C04,
+ .num_cfg = 0x00000C08,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3B,
+ .bank_1 = 0x3C,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_2PD */
+ {
+ },
+ },
+};
+
+
static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
.hw_version = 0x00000000,
.hw_capability = 0x00000004,
@@ -173,6 +336,31 @@ static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
NULL,
},
},
+ .dump_data = {
+ .num_reg_dump_entries = 2,
+ .num_lut_dump_entries = 1,
+ .dmi_cfg = 0xc24,
+ .dmi_addr = 0xc28,
+ .dmi_data_path_hi = 0xc2C,
+ .dmi_data_path_lo = 0xc30,
+ .reg_entry = {
+ {
+ .reg_dump_start = 0x0,
+ .reg_dump_end = 0x1160,
+ },
+ {
+ .reg_dump_start = 0x2000,
+ .reg_dump_end = 0x3978,
+ },
+ },
+ .lut_entry = {
+ {
+ .lut_word_size = 64,
+ .lut_bank_sel = 0x40,
+ .lut_addr_size = 180,
+ },
+ },
+ },
.mux_type = {
CAM_VFE_CAMIF_VER_2_0,
CAM_VFE_RDI_VER_1_0,
@@ -834,6 +1022,7 @@ static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
.ubwc_8bit_threshold_lossy_0 = 0,
.ubwc_8bit_threshold_lossy_1 = 0,
},
+ .stats_data = &stats_170_info,
};
struct cam_vfe_hw_info cam_vfe170_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
index edb595e70ad9..2bdbd2435ef0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
@@ -131,6 +131,179 @@ struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_175_reg = {
.enable = 0x0000004C,
};
+
+static struct cam_vfe_bus_ver2_stats_cfg_info stats_175_info = {
+ .dmi_offset_info = {
+ .auto_increment = 0x00000100,
+ .cfg_offset = 0x00000C24,
+ .addr_offset = 0x00000C28,
+ .data_hi_offset = 0x00000C2C,
+ .data_lo_offset = 0x00000C30,
+ },
+ .stats_cfg_offset = {
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI0 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI1 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI2 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI3 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FD */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_PDAF */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ .cfg_offset = 0x00000AB8,
+ .num_cfg = 0x00000ABC,
+ .cfg_size = 0x00000AC0,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST */
+ {
+ .res_index =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ .cfg_offset = 0x00000AD4,
+ .num_cfg = 0x00000AD8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x36,
+ .bank_1 = 0x37,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ .cfg_offset = 0x00000AE4,
+ .num_cfg = 0x00000000,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x40,
+ .bank_1 = 0x41,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ .cfg_offset = 0x00000BC8,
+ .num_cfg = 0x00000BCC,
+ .cfg_size = 0x00000BD0,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ .cfg_offset = 0x00000BE4,
+ .num_cfg = 0x00000BE8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3A,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ .cfg_offset = 0x00000BEC,
+ .num_cfg = 0x00000BF0,
+ .cfg_size = 0x00000BF4,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ .cfg_offset = 0x00000BF8,
+ .num_cfg = 0x00000BFC,
+ .cfg_size = 0x00000C00,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ .cfg_offset = 0x00000C04,
+ .num_cfg = 0x00000C08,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3B,
+ .bank_1 = 0x3C,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_2PD */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_2PD,
+ .cfg_offset = 0x00000FF0,
+ .num_cfg = 0x00000FF4,
+ .cfg_size = 0x00000FF8,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x44,
+ .bank_1 = 0x45,
+ },
+ },
+ },
+};
+
static struct cam_vfe_top_ver2_reg_offset_common vfe175_top_common_reg = {
.hw_version = 0x00000000,
.hw_capability = 0x00000004,
@@ -196,6 +369,31 @@ static struct cam_vfe_top_ver2_hw_info vfe175_top_hw_info = {
NULL,
},
},
+ .dump_data = {
+ .num_reg_dump_entries = 2,
+ .num_lut_dump_entries = 1,
+ .dmi_cfg = 0xc24,
+ .dmi_addr = 0xc28,
+ .dmi_data_path_hi = 0xc2C,
+ .dmi_data_path_lo = 0xc30,
+ .reg_entry = {
+ {
+ .reg_dump_start = 0x0,
+ .reg_dump_end = 0x1160,
+ },
+ {
+ .reg_dump_start = 0x2000,
+ .reg_dump_end = 0x3978,
+ },
+ },
+ .lut_entry = {
+ {
+ .lut_word_size = 64,
+ .lut_bank_sel = 0x40,
+ .lut_addr_size = 180,
+ },
+ },
+ },
.mux_type = {
CAM_VFE_CAMIF_VER_2_0,
CAM_VFE_RDI_VER_1_0,
@@ -1001,6 +1199,7 @@ static struct cam_vfe_bus_ver2_hw_info vfe175_bus_hw_info = {
.ubwc_8bit_threshold_lossy_0 = 0x6210022,
.ubwc_8bit_threshold_lossy_1 = 0xE0E,
},
+ .stats_data = &stats_175_info,
};
struct cam_vfe_hw_info cam_vfe175_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
index 3ca5bec71113..0fce972a8410 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
@@ -181,6 +181,175 @@ struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_175_130_reg = {
.enable = 0x0000004C,
};
+static struct cam_vfe_bus_ver2_stats_cfg_info stats_175_130_info = {
+ .dmi_offset_info = {
+ .auto_increment = 0x00000100,
+ .cfg_offset = 0x00000C24,
+ .addr_offset = 0x00000C28,
+ .data_hi_offset = 0x00000C2C,
+ .data_lo_offset = 0x00000C30,
+ },
+ .stats_cfg_offset = {
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI0 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI1 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI2 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RDI3 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16 */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FD */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_PDAF */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+ .cfg_offset = 0x00000AB8,
+ .num_cfg = 0x00000ABC,
+ .cfg_size = 0x00000AC0,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST */
+ {
+ .res_index =
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+ .cfg_offset = 0x00000AD4,
+ .num_cfg = 0x00000AD8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x36,
+ .bank_1 = 0x37,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+ .cfg_offset = 0x00000AE4,
+ .num_cfg = 0x00000000,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x40,
+ .bank_1 = 0x41,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+ .cfg_offset = 0x00000BC8,
+ .num_cfg = 0x00000BCC,
+ .cfg_size = 0x00000BD0,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+ .cfg_offset = 0x00000BE4,
+ .num_cfg = 0x00000BE8,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3A,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+ .cfg_offset = 0x00000BEC,
+ .num_cfg = 0x00000BF0,
+ .cfg_size = 0x00000BF4,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+ .cfg_offset = 0x00000BF8,
+ .num_cfg = 0x00000BFC,
+ .cfg_size = 0x00000C00,
+ .is_lut = 0,
+ .lut = {
+ .size = -1,
+ .bank_0 = -1,
+ .bank_1 = -1,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+ .cfg_offset = 0x00000C04,
+ .num_cfg = 0x00000C08,
+ .cfg_size = 0x00000000,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x3B,
+ .bank_1 = 0x3C,
+ },
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP */
+ {
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_2PD */
+ {
+ .res_index = CAM_VFE_BUS_VER2_VFE_OUT_2PD,
+ .cfg_offset = 0x00000FF0,
+ .num_cfg = 0x00000FF4,
+ .cfg_size = 0x00000FF8,
+ .is_lut = 1,
+ .lut = {
+ .size = 180,
+ .bank_0 = 0x44,
+ .bank_1 = 0x45,
+ },
+ },
+ },
+};
+
static struct cam_vfe_top_ver2_reg_offset_common vfe175_130_top_common_reg = {
.hw_version = 0x00000000,
.hw_capability = 0x00000004,
@@ -251,6 +420,31 @@ static struct cam_vfe_top_ver2_hw_info vfe175_130_top_hw_info = {
.fe_reg = &vfe175_130_fe_reg,
.reg_data = &vfe_175_130_fe_reg_data,
},
+ .dump_data = {
+ .num_reg_dump_entries = 2,
+ .num_lut_dump_entries = 1,
+ .dmi_cfg = 0xc24,
+ .dmi_addr = 0xc28,
+ .dmi_data_path_hi = 0xc2C,
+ .dmi_data_path_lo = 0xc30,
+ .reg_entry = {
+ {
+ .reg_dump_start = 0x0,
+ .reg_dump_end = 0x1160,
+ },
+ {
+ .reg_dump_start = 0x2000,
+ .reg_dump_end = 0x3978,
+ },
+ },
+ .lut_entry = {
+ {
+ .lut_word_size = 64,
+ .lut_bank_sel = 0x40,
+ .lut_addr_size = 180,
+ },
+ },
+ },
.mux_type = {
CAM_VFE_CAMIF_VER_2_0,
CAM_VFE_RDI_VER_1_0,
@@ -1106,6 +1300,7 @@ static struct cam_vfe_bus_ver2_hw_info vfe175_130_bus_hw_info = {
.ubwc_8bit_threshold_lossy_0 = 0x6210022,
.ubwc_8bit_threshold_lossy_1 = 0xE0E,
},
+ .stats_data = &stats_175_130_info,
};
struct cam_vfe_hw_info cam_vfe175_130_hw_info = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 8ad4d96dcff8..52de7b10cbe6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -34,6 +34,9 @@ static const char drv_name[] = "vfe_bus";
#define CAM_VFE_BUS_IRQ_REG2 2
#define CAM_VFE_BUS_IRQ_MAX 3
+#define CAM_VFE_BUS_LUT_WORD_SIZE_64 1
+#define CAM_VFE_BUS_LUT_WORD_SIZE_32 2
+
#define CAM_VFE_BUS_VER2_PAYLOAD_MAX 256
#define CAM_VFE_BUS_SET_DEBUG_REG 0x82
@@ -116,6 +119,8 @@ struct cam_vfe_bus_ver2_common_data {
uint32_t num_sec_out;
uint32_t addr_no_sync;
uint32_t camera_hw_version;
+ struct cam_vfe_bus_ver2_stats_cfg_info *stats_data;
+ uint32_t enable_dmi_dump;
};
struct cam_vfe_bus_ver2_wm_resource_data {
@@ -1339,20 +1344,58 @@ static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
return rc;
}
+static void cam_vfe_bus_dump_dmi_reg(
+ void __iomem *mem_base,
+ uint32_t lut_word_size,
+ uint32_t lut_size,
+ uint32_t lut_bank_sel,
+ struct cam_vfe_bus_ver2_dmi_offset_common dmi_cfg)
+{
+ uint32_t i;
+ uint32_t val_0;
+ uint32_t val_1;
+
+ val_0 = dmi_cfg.auto_increment | lut_bank_sel;
+ cam_io_w_mb(val_0, mem_base + dmi_cfg.cfg_offset);
+ cam_io_w_mb(0, mem_base + dmi_cfg.addr_offset);
+ for (i = 0; i < lut_size; i++) {
+ if (lut_word_size == CAM_VFE_BUS_LUT_WORD_SIZE_64) {
+ val_0 = cam_io_r_mb(mem_base +
+ dmi_cfg.data_lo_offset);
+ val_1 = cam_io_r_mb(mem_base +
+ dmi_cfg.data_hi_offset);
+ CAM_INFO(CAM_ISP,
+ "Bank%d : 0x%x, LO: 0x%x, HI:0x%x",
+ lut_bank_sel, i, val_0, val_1);
+ } else {
+ val_0 = cam_io_r_mb(mem_base +
+ dmi_cfg.data_lo_offset);
+ CAM_INFO(CAM_ISP, "Bank%d : 0x%x, LO: 0x%x",
+ lut_bank_sel, i, val_0);
+ }
+ }
+ cam_io_w_mb(0, mem_base + dmi_cfg.cfg_offset);
+ cam_io_w_mb(0, mem_base + dmi_cfg.addr_offset);
+}
static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
void *evt_payload_priv)
{
struct cam_vfe_bus_irq_evt_payload *evt_payload;
struct cam_vfe_bus_ver2_common_data *common_data;
+ struct cam_vfe_bus_ver2_stats_cfg_offset *stats_cfg;
+ struct cam_vfe_bus_ver2_dmi_offset_common dmi_cfg;
uint32_t val = 0;
+ uint32_t enable_dmi_dump;
if (!ctx_priv || !evt_payload_priv)
return -EINVAL;
evt_payload = evt_payload_priv;
common_data = evt_payload->ctx;
-
+ enable_dmi_dump = evt_payload->enable_dump;
+ stats_cfg = common_data->stats_data->stats_cfg_offset;
+ dmi_cfg = common_data->stats_data->dmi_offset_info;
val = evt_payload->debug_status_0;
CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val);
@@ -1386,35 +1429,202 @@ static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
if (val & 0x0200)
CAM_INFO(CAM_ISP, "RAW DUMP violation");
- if (val & 0x0400)
+ if (val & 0x0400) {
CAM_INFO(CAM_ISP, "PDAF violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_DMI_DUMP) {
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_PDAF].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_PDAF].lut.bank_0,
+ dmi_cfg);
+ }
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_PDAF].cfg_offset));
+ }
+ }
- if (val & 0x0800)
+ if (val & 0x0800) {
CAM_INFO(CAM_ISP, "STATs HDR BE violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE].cfg_offset));
- if (val & 0x01000)
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE].num_cfg));
+ }
+ }
+
+ if (val & 0x01000) {
CAM_INFO(CAM_ISP, "STATs HDR BHIST violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_DMI_DUMP) {
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].lut.bank_0,
+ dmi_cfg);
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].lut.bank_1,
+ dmi_cfg);
+ }
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].cfg_offset));
+
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST].num_cfg));
+ }
+ }
if (val & 0x02000)
CAM_INFO(CAM_ISP, "STATs TINTLESS BG violation");
- if (val & 0x04000)
+ if (val & 0x04000) {
CAM_INFO(CAM_ISP, "STATs BF violation");
-
- if (val & 0x08000)
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_DMI_DUMP) {
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF].lut.bank_0,
+ dmi_cfg);
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF].lut.bank_1,
+ dmi_cfg);
+ }
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF].cfg_offset));
+ }
+ }
+
+ if (val & 0x08000) {
CAM_INFO(CAM_ISP, "STATs AWB BG UBWC violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
- if (val & 0x010000)
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG].cfg_offset));
+
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG].num_cfg));
+ }
+ }
+
+ if (val & 0x010000) {
CAM_INFO(CAM_ISP, "STATs BHIST violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_DMI_DUMP) {
+
+ cam_vfe_bus_dump_dmi_reg(
+ common_data->mem_base,
+ CAM_VFE_BUS_LUT_WORD_SIZE_64,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST].lut.size,
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST].lut.bank_0,
+ dmi_cfg);
+ }
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
- if (val & 0x020000)
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST].cfg_offset));
+
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST].num_cfg));
+ }
+ }
+
+ if (val & 0x020000) {
CAM_INFO(CAM_ISP, "STATs RS violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS].cfg_offset));
- if (val & 0x040000)
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS].num_cfg));
+ }
+ }
+
+ if (val & 0x040000) {
CAM_INFO(CAM_ISP, "STATs CS violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS].cfg_offset));
+
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS].num_cfg));
+ }
+ }
- if (val & 0x080000)
+ if (val & 0x080000) {
CAM_INFO(CAM_ISP, "STATs IHIST violation");
+ if (enable_dmi_dump & CAM_VFE_BUS_ENABLE_STATS_REG_DUMP) {
+
+ CAM_INFO(CAM_ISP, "RGN offset cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST].cfg_offset));
+
+ CAM_INFO(CAM_ISP, "RGN num cfg 0x%08x",
+ cam_io_r_mb(common_data->mem_base +
+ stats_cfg[
+ CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST].num_cfg));
+ }
+ }
if (val & 0x0100000)
CAM_INFO(CAM_ISP, "DISP Y 1:1 UBWC violation");
@@ -2504,6 +2714,7 @@ static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id,
evt_payload->debug_status_0 = cam_io_r_mb(
bus_priv->common_data.mem_base +
bus_priv->common_data.common_reg->debug_status_0);
+ evt_payload->enable_dump = bus_priv->common_data.enable_dmi_dump;
th_payload->evt_payload_priv = evt_payload;
@@ -3319,6 +3530,7 @@ static int cam_vfe_bus_process_cmd(
{
int rc = -EINVAL;
struct cam_vfe_bus_ver2_priv *bus_priv;
+ struct cam_isp_hw_get_cmd_update *cmd_update;
if (!priv || !cmd_args) {
CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
@@ -3356,6 +3568,12 @@ static int cam_vfe_bus_process_cmd(
case CAM_ISP_HW_CMD_UBWC_UPDATE:
rc = cam_vfe_bus_update_ubwc_config(cmd_args);
break;
+ case CAM_ISP_HW_CMD_SET_STATS_DMI_DUMP:
+ bus_priv = (struct cam_vfe_bus_ver2_priv *) priv;
+ cmd_update = (struct cam_isp_hw_get_cmd_update *) cmd_args;
+ bus_priv->common_data.enable_dmi_dump =
+ *((uint32_t *)cmd_update->data);
+ break;
default:
CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
cmd_type);
@@ -3427,6 +3645,7 @@ int cam_vfe_bus_ver2_init(
bus_priv->common_data.addr_no_sync =
CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL;
bus_priv->common_data.camera_hw_version = camera_hw_version;
+ bus_priv->common_data.stats_data = ver2_hw_info->stats_data;
mutex_init(&bus_priv->common_data.bus_mutex);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
index 39d8fa561590..fb52335ae0d5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -18,6 +18,9 @@
#define CAM_VFE_BUS_VER2_MAX_CLIENTS 24
+#define CAM_VFE_BUS_ENABLE_DMI_DUMP BIT(0)
+#define CAM_VFE_BUS_ENABLE_STATS_REG_DUMP BIT(1)
+
enum cam_vfe_bus_ver2_vfe_core_id {
CAM_VFE_BUS_VER2_VFE_CORE_0,
CAM_VFE_BUS_VER2_VFE_CORE_1,
@@ -67,6 +70,36 @@ enum cam_vfe_bus_ver2_vfe_out_type {
CAM_VFE_BUS_VER2_VFE_OUT_MAX,
};
+struct cam_vfe_bus_ver2_dmi_lut_bank_info {
+ uint32_t size;
+ uint32_t bank_0;
+ uint32_t bank_1;
+};
+
+struct cam_vfe_bus_ver2_stats_cfg_offset {
+ uint32_t res_index;
+ uint32_t cfg_offset;
+ uint32_t num_cfg;
+ uint32_t cfg_size;
+ uint32_t is_lut;
+ struct cam_vfe_bus_ver2_dmi_lut_bank_info lut;
+};
+
+struct cam_vfe_bus_ver2_dmi_offset_common {
+ uint32_t auto_increment;
+ uint32_t cfg_offset;
+ uint32_t addr_offset;
+ uint32_t data_hi_offset;
+ uint32_t data_lo_offset;
+};
+
+struct cam_vfe_bus_ver2_stats_cfg_info {
+ struct cam_vfe_bus_ver2_dmi_offset_common
+ dmi_offset_info;
+ struct cam_vfe_bus_ver2_stats_cfg_offset
+ stats_cfg_offset[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+};
+
/*
* struct cam_vfe_bus_ver2_reg_offset_common:
*
@@ -211,6 +244,7 @@ struct cam_vfe_bus_ver2_hw_info {
struct cam_vfe_bus_ver2_vfe_out_hw_info
vfe_out_hw_info[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
struct cam_vfe_bus_ver2_reg_data reg_data;
+ struct cam_vfe_bus_ver2_stats_cfg_info *stats_data;
};
/*
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
index 0461b0820b4f..cfc67c676cee 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -28,6 +28,7 @@ struct cam_vfe_top_ver2_common_data {
struct cam_hw_soc_info *soc_info;
struct cam_hw_intf *hw_intf;
struct cam_vfe_top_ver2_reg_offset_common *common_reg;
+ struct cam_vfe_top_dump_data *dump_data;
};
struct cam_vfe_top_ver2_priv {
@@ -467,6 +468,117 @@ static int cam_vfe_get_irq_register_dump(
return 0;
}
+static int cam_vfe_hw_dump(
+ struct cam_vfe_top_ver2_priv *top_priv,
+ void *cmd_args,
+ uint32_t arg_size)
+{
+ struct cam_isp_hw_dump_args *dump_args =
+ (struct cam_isp_hw_dump_args *)cmd_args;
+ struct cam_hw_soc_info *soc_info;
+ uint32_t i, j;
+ struct cam_vfe_top_dump_data *dump_data;
+ uint32_t reg_dump_size = 0, lut_dump_size = 0;
+ uint32_t reg_start_offset;
+ uint32_t val = 0;
+ uint32_t num_reg;
+ void __iomem *reg_base;
+ uint32_t *addr, *start;
+ struct cam_isp_hw_dump_header *hdr;
+ uint8_t *dst;
+
+ if (!dump_args->cpu_addr || !dump_args->buf_len) {
+ CAM_ERR(CAM_ISP,
+ "lnvalid addr len %zu", dump_args->buf_len);
+ return -EINVAL;
+ }
+ dump_data = top_priv->common_data.dump_data;
+ soc_info = top_priv->common_data.soc_info;
+
+ /*Dump registers */
+ for (i = 0; i < dump_data->num_reg_dump_entries; i++)
+ reg_dump_size += (dump_data->reg_entry[i].reg_dump_end -
+ dump_data->reg_entry[i].reg_dump_start);
+ /*
+ * We dump the offset as well, so the total size dumped becomes
+ * multiplied by 2
+ */
+ reg_dump_size *= 2;
+ for (i = 0; i < dump_data->num_lut_dump_entries; i++)
+ lut_dump_size += ((dump_data->lut_entry[i].lut_addr_size) *
+ (dump_data->lut_entry[i].lut_word_size/8));
+
+ if ((dump_args->buf_len - dump_args->offset) <
+ (lut_dump_size + reg_dump_size +
+ sizeof(struct cam_isp_hw_dump_header))) {
+ CAM_ERR(CAM_ISP, "Dump buffer exhaust");
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_isp_hw_dump_header *)dst;
+ hdr->word_size = sizeof(uint32_t);
+ snprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "VFE_REG:");
+ addr = (uint32_t *)(dst + sizeof(struct cam_isp_hw_dump_header));
+ start = addr;
+ *addr++ = soc_info->index;
+ for (i = 0; i < dump_data->num_reg_dump_entries; i++) {
+ num_reg = (dump_data->reg_entry[i].reg_dump_end -
+ dump_data->reg_entry[i].reg_dump_start)/4;
+ reg_start_offset = dump_data->reg_entry[i].reg_dump_start;
+ reg_base = soc_info->reg_map[0].mem_base + reg_start_offset;
+ for (j = 0; j < num_reg; j++) {
+ addr[0] = soc_info->mem_block[0]->start +
+ reg_start_offset + (j*4);
+ addr[1] = cam_io_r(reg_base + (j*4));
+ addr += 2;
+ }
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_isp_hw_dump_header);
+
+ /*dump LUT*/
+ for (i = 0; i < dump_data->num_lut_dump_entries; i++) {
+
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_isp_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_ISP_HW_DUMP_TAG_MAX_LEN, "LUT_REG:");
+ hdr->word_size = dump_data->lut_entry[i].lut_word_size/8;
+ addr = (uint32_t *)(dst +
+ sizeof(struct cam_isp_hw_dump_header));
+ start = addr;
+ *addr++ = dump_data->lut_entry[i].lut_bank_sel;
+ *addr++ = soc_info->index;
+ val = 0x100 | dump_data->lut_entry[i].lut_bank_sel;
+ cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+ dump_data->dmi_cfg);
+ cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+ dump_data->dmi_addr);
+ for (j = 0; j < dump_data->lut_entry[i].lut_addr_size;
+ j++) {
+ if (dump_data->lut_entry[i].lut_word_size == 64) {
+ addr[0] = cam_io_r(
+ soc_info->reg_map[0].mem_base +
+ dump_data->dmi_data_path_lo);
+ addr[1] = cam_io_r(
+ soc_info->reg_map[0].mem_base +
+ dump_data->dmi_data_path_hi);
+ addr += 2;
+ } else {
+ *addr = cam_io_r(
+ soc_info->reg_map[0].mem_base +
+ dump_data->dmi_data_path_lo);
+ addr++;
+ }
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_isp_hw_dump_header);
+ }
+ CAM_DBG(CAM_ISP, "offset %d", dump_args->offset);
+ return 0;
+}
+
int cam_vfe_top_get_hw_caps(void *device_priv,
void *get_hw_cap_args, uint32_t arg_size)
{
@@ -760,6 +872,10 @@ int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
rc = cam_vfe_top_fps_config(top_priv, cmd_args,
arg_size);
break;
+ case CAM_ISP_HW_CMD_DUMP_HW:
+ rc = cam_vfe_hw_dump(top_priv,
+ cmd_args, arg_size);
+ break;
default:
rc = -EINVAL;
CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
@@ -903,6 +1019,7 @@ int cam_vfe_top_ver2_init(
top_priv->common_data.soc_info = soc_info;
top_priv->common_data.hw_intf = hw_intf;
top_priv->common_data.common_reg = ver2_hw_info->common_reg;
+ top_priv->common_data.dump_data = &ver2_hw_info->dump_data;
return rc;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
index 86f0d8c26ff5..7cfc023fed12 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,10 @@
#define CAM_VFE_TOP_VER2_MUX_MAX 6
+#define CAM_VFE_MAX_REG_DUMP_ENTRIES 5
+
+#define CAM_VFE_MAX_LUT_DUMP_ENTRIES 6
+
enum cam_vfe_top_ver2_module_type {
CAM_VFE_TOP_VER2_MODULE_LENS,
CAM_VFE_TOP_VER2_MODULE_STATS,
@@ -34,6 +38,30 @@ struct cam_vfe_top_ver2_reg_offset_module_ctrl {
uint32_t enable;
};
+struct cam_vfe_top_reg_dump_entry {
+ uint32_t reg_dump_start;
+ uint32_t reg_dump_end;
+};
+
+struct cam_vfe_top_lut_dump_entry {
+ uint32_t lut_word_size;
+ uint32_t lut_bank_sel;
+ uint32_t lut_addr_size;
+};
+
+struct cam_vfe_top_dump_data {
+ uint32_t num_reg_dump_entries;
+ uint32_t num_lut_dump_entries;
+ uint32_t dmi_cfg;
+ uint32_t dmi_addr;
+ uint32_t dmi_data_path_hi;
+ uint32_t dmi_data_path_lo;
+ struct cam_vfe_top_reg_dump_entry
+ reg_entry[CAM_VFE_MAX_REG_DUMP_ENTRIES];
+ struct cam_vfe_top_lut_dump_entry
+ lut_entry[CAM_VFE_MAX_LUT_DUMP_ENTRIES];
+};
+
struct cam_vfe_top_ver2_reg_offset_common {
uint32_t hw_version;
uint32_t hw_capability;
@@ -57,6 +85,7 @@ struct cam_vfe_top_ver2_hw_info {
struct cam_vfe_camif_lite_ver2_hw_info camif_lite_hw_info;
struct cam_vfe_rdi_ver2_hw_info rdi_hw_info;
struct cam_vfe_fe_ver1_hw_info fe_hw_info;
+ struct cam_vfe_top_dump_data dump_data;
uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
};
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
index 1c910621b655..e151a7ebd74f 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -91,6 +91,18 @@ static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
return rc;
}
+static int __cam_jpeg_ctx_dump_dev_in_acquired(struct cam_context *ctx,
+ struct cam_dump_req_cmd *cmd)
+{
+ int rc;
+
+ rc = cam_context_dump_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_ICP, "Failed to dump device, rc=%d", rc);
+
+ return rc;
+}
+
static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
@@ -153,6 +165,7 @@ static struct cam_ctx_ops
.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
.stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
.flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired,
+ .dump_dev = __cam_jpeg_ctx_dump_dev_in_acquired,
},
.crm_ops = { },
.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
index 696566d6a37a..1a995646a947 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -492,7 +492,7 @@ static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
rc);
goto rel_cpu_buf;
}
-
+ cam_common_util_get_curr_timestamp(&p_cfg_req->submit_timestamp);
if (cam_mem_put_cpu_buf(
config_args->hw_update_entries[CAM_JPEG_CHBASE].handle))
CAM_WARN(CAM_JPEG, "unable to put info for cmd buf: 0x%x",
@@ -1043,6 +1043,116 @@ static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args)
return rc;
}
+static int cam_jpeg_mgr_hw_dump(void *hw_mgr_priv, void *dump_hw_args)
+{
+ int rc = 0;
+ struct cam_hw_dump_args *dump_args =
+ (struct cam_hw_dump_args *)dump_hw_args;
+ struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+ struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+ struct timeval cur_time;
+ uint32_t dev_type;
+ uint64_t diff;
+ uint64_t *addr, *start;
+ char *dst;
+ struct cam_jpeg_hw_dump_header *hdr;
+ uint32_t min_len, remain_len;
+ struct cam_jpeg_hw_dump_args jpeg_dump_args;
+
+ if (!hw_mgr || !dump_args || !dump_args->ctxt_to_hw_map) {
+ CAM_ERR(CAM_JPEG, "Invalid args");
+ return -EINVAL;
+ }
+ mutex_lock(&hw_mgr->hw_mgr_mutex);
+ ctx_data = (struct cam_jpeg_hw_ctx_data *)dump_args->ctxt_to_hw_map;
+ if (!ctx_data->in_use) {
+ CAM_ERR(CAM_JPEG, "ctx is not in use");
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return -EINVAL;
+ }
+ dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+ if (true == hw_mgr->device_in_use[dev_type][0]) {
+ p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+ if (p_cfg_req && p_cfg_req->req_id ==
+ (uintptr_t)dump_args->request_id)
+ goto hw_dump;
+ }
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return 0;
+
+hw_dump:
+ cam_common_util_get_curr_timestamp(&cur_time);
+ diff = cam_common_util_get_time_diff(&cur_time,
+ &p_cfg_req->submit_timestamp);
+ if (diff < CAM_JPEG_RESPONSE_TIME_THRESHOLD) {
+ CAM_INFO(CAM_JPEG,
+ "No error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ p_cfg_req->submit_timestamp.tv_sec,
+ p_cfg_req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return 0;
+ }
+ CAM_INFO(CAM_JPEG,
+ "Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ p_cfg_req->submit_timestamp.tv_sec,
+ p_cfg_req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &jpeg_dump_args.cpu_addr, &jpeg_dump_args.buf_len);
+ if (!jpeg_dump_args.cpu_addr || !jpeg_dump_args.buf_len || rc) {
+ CAM_ERR(CAM_JPEG,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle, jpeg_dump_args.buf_len, rc);
+ goto end;
+ }
+ remain_len = jpeg_dump_args.buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_jpeg_hw_dump_header) +
+ CAM_JPEG_HW_DUMP_TAG_MAX_LEN);
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_JPEG, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ goto end;
+ }
+ dst = (char *)jpeg_dump_args.cpu_addr + dump_args->offset;
+ hdr = (struct cam_jpeg_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_JPEG_HW_DUMP_TAG_MAX_LEN,
+ "JPEG_REQ:");
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst + sizeof(struct cam_jpeg_hw_dump_header));
+ start = addr;
+ *addr++ = dump_args->request_id;
+ *addr++ = p_cfg_req->submit_timestamp.tv_sec;
+ *addr++ = p_cfg_req->submit_timestamp.tv_usec;
+ *addr++ = cur_time.tv_sec;
+ *addr++ = cur_time.tv_usec;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_jpeg_hw_dump_header);
+ jpeg_dump_args.request_id = dump_args->request_id;
+ jpeg_dump_args.offset = dump_args->offset;
+
+ if (hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+ rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+ hw_mgr->devices[dev_type][0]->hw_priv,
+ CAM_JPEG_CMD_HW_DUMP,
+ &jpeg_dump_args, sizeof(jpeg_dump_args));
+ }
+ dump_args->offset = jpeg_dump_args.offset;
+end:
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_JPEG, "Cpu put failed handle %u",
+ dump_args->buf_handle);
+ mutex_unlock(&hw_mgr->hw_mgr_mutex);
+ return rc;
+}
+
static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
{
int rc;
@@ -1564,6 +1674,7 @@ int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd;
+ hw_mgr_intf->hw_dump = cam_jpeg_mgr_hw_dump;
mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
index 82022ec24a77..2dc11dfa0746 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,12 @@
#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
#define CAM_JPEG_HW_CFG_Q_MAX 50
+/*
+ * Response time threshold in ms beyond which a request is not expected
+ * to be with JPEG hw
+ */
+#define CAM_JPEG_RESPONSE_TIME_THRESHOLD 100000
+
/**
* struct cam_jpeg_process_frame_work_data_t
*
@@ -76,12 +82,14 @@ struct cam_jpeg_hw_cdm_info_t {
* @hw_cfg_args: Hw config args
* @dev_type: Dev type for cfg request
* @req_id: Request Id
+ * @submit_timestamp: Timestamp of submitting request
*/
struct cam_jpeg_hw_cfg_req {
struct list_head list;
struct cam_hw_config_args hw_cfg_args;
uint32_t dev_type;
uintptr_t req_id;
+ struct timeval submit_timestamp;
};
/**
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
index 48d8f14d243d..06fafae792f1 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,6 +23,8 @@
#define JPEG_VOTE 640000000
+#define CAM_JPEG_HW_DUMP_TAG_MAX_LEN 32
+
enum cam_jpeg_hw_type {
CAM_JPEG_DEV_ENC,
CAM_JPEG_DEV_DMA,
@@ -35,9 +37,23 @@ struct cam_jpeg_set_irq_cb {
uint32_t b_set_cb;
};
+struct cam_jpeg_hw_dump_args {
+ uintptr_t cpu_addr;
+ uint64_t offset;
+ uint64_t request_id;
+ size_t buf_len;
+};
+
+struct cam_jpeg_hw_dump_header {
+ char tag[CAM_JPEG_HW_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
enum cam_jpeg_cmd_type {
CAM_JPEG_CMD_CDM_CFG,
CAM_JPEG_CMD_SET_IRQ_CB,
+ CAM_JPEG_CMD_HW_DUMP,
CAM_JPEG_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
index c6017afd3c37..8cdf59b70ef4 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -73,6 +73,10 @@ static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
.resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK,
.iserror = CAM_JPEG_HW_MASK_COMP_ERR,
.stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK,
+ },
+ .reg_dump = {
+ .start_offset = 0x0,
+ .end_offset = 0x33C,
}
};
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
index 225f859674f1..54dd62c44f67 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -410,6 +410,64 @@ int cam_jpeg_enc_stop_hw(void *data,
return 0;
}
+int cam_jpeg_enc_hw_dump(
+ struct cam_hw_info *jpeg_enc_dev,
+ struct cam_jpeg_hw_dump_args *dump_args)
+{
+
+ struct cam_hw_soc_info *soc_info = NULL;
+ int i;
+ char *dst;
+ uint32_t *addr, *start;
+ struct cam_jpeg_hw_dump_header *hdr;
+ uint32_t num_reg, min_len, remain_len, reg_start_offset;
+ struct cam_jpeg_enc_device_core_info *core_info;
+ struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+
+ soc_info = &jpeg_enc_dev->soc_info;
+ core_info = (struct cam_jpeg_enc_device_core_info *)
+ jpeg_enc_dev->core_info;
+ hw_info = core_info->jpeg_enc_hw_info;
+ spin_lock(&jpeg_enc_dev->hw_lock);
+ if (jpeg_enc_dev->hw_state == CAM_HW_STATE_POWER_DOWN) {
+ CAM_ERR(CAM_JPEG, "JPEG HW is in off state");
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ return -EINVAL;
+ }
+ spin_unlock(&jpeg_enc_dev->hw_lock);
+ remain_len = dump_args->buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_jpeg_hw_dump_header) +
+ CAM_JPEG_HW_DUMP_TAG_MAX_LEN) +
+ soc_info->reg_map[0].size;
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_JPEG, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_jpeg_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_JPEG_HW_DUMP_TAG_MAX_LEN,
+ "JPEG_REG:");
+ hdr->word_size = sizeof(uint32_t);
+ addr = (uint32_t *)(dst + sizeof(struct cam_jpeg_hw_dump_header));
+ start = addr;
+ *addr++ = soc_info->index;
+ num_reg = (hw_info->reg_dump.end_offset -
+ hw_info->reg_dump.start_offset)/4;
+ reg_start_offset = hw_info->reg_dump.start_offset;
+ for (i = 0; i < num_reg; i++) {
+ *addr++ = soc_info->mem_block[0]->start +
+ reg_start_offset + i*4;
+ *addr++ = cam_io_r(soc_info->reg_map[0].mem_base + (i*4));
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_jpeg_hw_dump_header);
+ CAM_DBG(CAM_JPEG, "offset %d", dump_args->offset);
+
+ return 0;
+}
+
int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
void *cmd_args, uint32_t arg_size)
{
@@ -450,6 +508,12 @@ int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
rc = 0;
break;
}
+ case CAM_JPEG_CMD_HW_DUMP:
+ {
+ rc = cam_jpeg_enc_hw_dump(jpeg_enc_dev,
+ cmd_args);
+ break;
+ }
default:
rc = -EINVAL;
break;
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
index 5c571ab893f8..0013dbee1681 100644
--- a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -47,10 +47,16 @@ struct cam_jpeg_enc_int_status {
uint32_t stopdone;
};
+struct cam_jpeg_enc_reg_dump {
+ uint32_t start_offset;
+ uint32_t end_offset;
+};
+
struct cam_jpeg_enc_device_hw_info {
struct cam_jpeg_enc_reg_offsets reg_offset;
struct cam_jpeg_enc_regval reg_val;
struct cam_jpeg_enc_int_status int_status;
+ struct cam_jpeg_enc_reg_dump reg_dump;
};
enum cam_jpeg_enc_core_state {
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
index 26bdc31250d1..7f677a1bfc4d 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -93,6 +93,20 @@ static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
return rc;
}
+static int __cam_lrme_ctx_dump_dev_in_activated(struct cam_context *ctx,
+ struct cam_dump_req_cmd *cmd)
+{
+ int rc = 0;
+
+ CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+ rc = cam_context_dump_dev_to_hw(ctx, cmd);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Failed to dump device");
+
+ return rc;
+}
+
static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
struct cam_flush_dev_cmd *cmd)
{
@@ -204,6 +218,7 @@ static struct cam_ctx_ops
.release_dev = __cam_lrme_ctx_release_dev_in_activated,
.stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
.flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
+ .dump_dev = __cam_lrme_ctx_dump_dev_in_activated,
},
.crm_ops = {},
.irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
index f46426f50e97..d87ce0202ecf 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -656,6 +656,54 @@ static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
return rc;
}
+static int cam_lrme_mgr_hw_dump(void *hw_mgr_priv, void *hw_dump_args)
+{
+ struct cam_hw_dump_args *dump_args = hw_dump_args;
+ struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+ struct cam_lrme_device *hw_device;
+ int rc = 0;
+ uint32_t device_index;
+ struct cam_lrme_hw_dump_args lrme_dump_args;
+
+ device_index = CAM_LRME_DECODE_DEVICE_INDEX(dump_args->ctxt_to_hw_map);
+ if (device_index >= hw_mgr->device_count) {
+ CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+ return -EPERM;
+ }
+
+ CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+ rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+ if (rc) {
+ CAM_ERR(CAM_LRME, "Failed to get hw device");
+ return rc;
+ }
+ rc = cam_mem_get_cpu_buf(dump_args->buf_handle,
+ &lrme_dump_args.cpu_addr,
+ &lrme_dump_args.buf_len);
+ if (!lrme_dump_args.cpu_addr || !lrme_dump_args.buf_len || rc) {
+ CAM_ERR(CAM_LRME,
+ "lnvalid addr %u len %zu rc %d",
+ dump_args->buf_handle, lrme_dump_args.buf_len, rc);
+ return rc;
+ }
+ lrme_dump_args.offset = dump_args->offset;
+ lrme_dump_args.request_id = dump_args->request_id;
+
+ rc = hw_device->hw_intf.hw_ops.process_cmd(
+ hw_device->hw_intf.hw_priv,
+ CAM_LRME_HW_CMD_DUMP,
+ &lrme_dump_args,
+ sizeof(struct cam_lrme_hw_dump_args));
+ dump_args->offset = lrme_dump_args.offset;
+
+ rc = cam_mem_put_cpu_buf(dump_args->buf_handle);
+ if (rc)
+ CAM_ERR(CAM_LRME, "Cpu put failed handle %u",
+ dump_args->buf_handle);
+ return rc;
+}
+
static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
{ int rc = 0, i;
struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
@@ -1153,6 +1201,7 @@ int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
hw_mgr_intf->hw_write = NULL;
hw_mgr_intf->hw_close = NULL;
hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush;
+ hw_mgr_intf->hw_dump = cam_lrme_mgr_hw_dump;
g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
index 9e14f1b2958d..880eaa4c20a6 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/timer.h>
#include "cam_lrme_hw_core.h"
#include "cam_lrme_hw_soc.h"
#include "cam_smmu_api.h"
@@ -28,6 +29,124 @@ static void cam_lrme_dump_registers(void __iomem *base)
cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4);
}
+static int cam_lrme_dump_regs_to_buf(
+ struct cam_lrme_frame_request *req,
+ struct cam_hw_info *lrme_hw,
+ struct cam_lrme_hw_dump_args *dump_args)
+{
+ struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+ int i;
+ char *dst;
+ uint32_t *addr, *start;
+ uint32_t num_reg, min_len, remain_len;
+ struct cam_lrme_hw_dump_header *hdr;
+
+ soc_info = &lrme_hw->soc_info;
+ remain_len = dump_args->buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_lrme_hw_dump_header) +
+ CAM_LRME_HW_DUMP_TAG_MAX_LEN) +
+ soc_info->reg_map[0].size;
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_LRME, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_lrme_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_LRME_HW_DUMP_TAG_MAX_LEN,
+ "LRME_REG:");
+ hdr->word_size = sizeof(uint32_t);
+ addr = (uint32_t *)(dst + sizeof(struct cam_lrme_hw_dump_header));
+ start = addr;
+ *addr++ = soc_info->index;
+ num_reg = soc_info->reg_map[0].size/4;
+ for (i = 0; i < num_reg; i++) {
+ *addr++ = soc_info->mem_block[0]->start + (i*4);
+ *addr++ = cam_io_r(soc_info->reg_map[0].mem_base + (i*4));
+ }
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_lrme_hw_dump_header);
+ CAM_DBG(CAM_LRME, "offset %d", dump_args->offset);
+ return 0;
+}
+
+static int cam_lrme_hw_dump(struct cam_hw_info *lrme_hw,
+ struct cam_lrme_hw_dump_args *dump_args)
+{
+ struct cam_lrme_core *lrme_core =
+ (struct cam_lrme_core *)lrme_hw->core_info;
+ struct cam_lrme_frame_request *req = NULL;
+ struct timeval cur_time;
+ uint64_t diff = 0;
+ char *dst;
+ uint64_t *addr, *start;
+ uint32_t min_len, remain_len;
+ struct cam_lrme_hw_dump_header *hdr;
+
+ mutex_lock(&lrme_hw->hw_mutex);
+ if (lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
+ CAM_DBG(CAM_LRME, "LRME HW is in off state");
+ mutex_unlock(&lrme_hw->hw_mutex);
+ return 0;
+ }
+ mutex_unlock(&lrme_hw->hw_mutex);
+ if (lrme_core->req_submit &&
+ lrme_core->req_submit->req_id == dump_args->request_id)
+ req = lrme_core->req_submit;
+ else if (lrme_core->req_proc &&
+ lrme_core->req_proc->req_id == dump_args->request_id)
+ req = lrme_core->req_proc;
+ if (!req) {
+ CAM_DBG(CAM_LRME, "LRME req %lld not with hw",
+ dump_args->request_id);
+ return 0;
+ }
+ cam_common_util_get_curr_timestamp(&cur_time);
+ diff = cam_common_util_get_time_diff(&cur_time,
+ &req->submit_timestamp);
+ if (diff < CAM_LRME_RESPONSE_TIME_THRESHOLD) {
+ CAM_INFO(CAM_LRME, "No error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ req->submit_timestamp.tv_sec,
+ req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ return 0;
+ }
+ CAM_INFO(CAM_LRME, "Error req %lld %ld:%06ld %ld:%06ld",
+ dump_args->request_id,
+ req->submit_timestamp.tv_sec,
+ req->submit_timestamp.tv_usec,
+ cur_time.tv_sec,
+ cur_time.tv_usec);
+ remain_len = dump_args->buf_len - dump_args->offset;
+ min_len = 2 * (sizeof(struct cam_lrme_hw_dump_header) +
+ CAM_LRME_HW_DUMP_TAG_MAX_LEN);
+ if (remain_len < min_len) {
+ CAM_ERR(CAM_LRME, "dump buffer exhaust %d %d",
+ remain_len, min_len);
+ return 0;
+ }
+ dst = (char *)dump_args->cpu_addr + dump_args->offset;
+ hdr = (struct cam_lrme_hw_dump_header *)dst;
+ snprintf(hdr->tag, CAM_LRME_HW_DUMP_TAG_MAX_LEN,
+ "LRME_REQ:");
+ hdr->word_size = sizeof(uint64_t);
+ addr = (uint64_t *)(dst + sizeof(struct cam_lrme_hw_dump_header));
+ start = addr;
+ *addr++ = req->req_id;
+ *addr++ = req->submit_timestamp.tv_sec;
+ *addr++ = req->submit_timestamp.tv_usec;
+ *addr++ = cur_time.tv_sec;
+ *addr++ = cur_time.tv_usec;
+ hdr->size = hdr->word_size * (addr - start);
+ dump_args->offset += hdr->size +
+ sizeof(struct cam_lrme_hw_dump_header);
+ cam_lrme_dump_regs_to_buf(req, lrme_hw, dump_args);
+ return 0;
+}
+
static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
{
@@ -964,7 +1083,7 @@ int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
CAM_ERR(CAM_LRME, "Submit req failed");
goto error;
}
-
+ cam_common_util_get_curr_timestamp(&frame_req->submit_timestamp);
switch (lrme_core->state) {
case CAM_LRME_CORE_STATE_PROCESSING:
lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
@@ -1274,6 +1393,12 @@ int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
break;
}
+ case CAM_LRME_HW_CMD_DUMP: {
+ struct cam_lrme_hw_dump_args *dump_args =
+ (struct cam_lrme_hw_dump_args *)cmd_args;
+ rc = cam_lrme_hw_dump(lrme_hw, dump_args);
+ break;
+ }
default:
break;
}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
index c0786f53c19c..1cb6e34f5c69 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -41,6 +41,8 @@
#define CAM_LRME_FE_IRQ_MASK 0x0
#define CAM_LRME_MAX_REG_PAIR_NUM 60
+#define CAM_LRME_RESPONSE_TIME_THRESHOLD 100000
+#define CAM_LRME_HW_DUMP_TAG_MAX_LEN 32
/**
* enum cam_lrme_irq_set
@@ -439,6 +441,20 @@ struct cam_lrme_hw_info {
struct cam_lrme_titan_reg titan_reg;
};
+/**
+ * struct cam_lrme_hw_dump_header : LRME hw dump header
+ *
+ * @tag : LRME hw dump header tag
+ * @size : Size of data
+ * @word_size : size of each word
+ */
+
+struct cam_lrme_hw_dump_header {
+ char tag[CAM_LRME_HW_DUMP_TAG_MAX_LEN];
+ uint64_t size;
+ uint32_t word_size;
+};
+
int cam_lrme_hw_process_irq(void *priv, void *data);
int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
uint32_t arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
index 4cd643f0413a..89485e1deeda 100644
--- a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,12 +66,14 @@ enum cam_lrme_cb_type {
* @CAM_LRME_HW_CMD_REGISTER_CB : register HW manager callback
* @CAM_LRME_HW_CMD_SUBMIT : Submit frame to HW
* @CAM_LRME_HW_CMD_DUMP_REGISTER : dump register values
+ * @CAM_LRME_HW_CMD_DUMP : dump register values to buffer
*/
enum cam_lrme_hw_cmd_type {
CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
CAM_LRME_HW_CMD_REGISTER_CB,
CAM_LRME_HW_CMD_SUBMIT,
CAM_LRME_HW_CMD_DUMP_REGISTER,
+ CAM_LRME_HW_CMD_DUMP,
};
/**
@@ -94,6 +96,7 @@ enum cam_lrme_hw_reset_type {
* @hw_device : Pointer to HW device
* @hw_update_entries : List of hw_update_entries
* @num_hw_update_entries : number of hw_update_entries
+ * @submit_timestamp : timestamp of submitting request with hw
*/
struct cam_lrme_frame_request {
struct list_head frame_list;
@@ -102,6 +105,7 @@ struct cam_lrme_frame_request {
struct cam_lrme_device *hw_device;
struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
uint32_t num_hw_update_entries;
+ struct timeval submit_timestamp;
};
/**
@@ -199,4 +203,19 @@ struct cam_lrme_hw_submit_args {
struct cam_lrme_frame_request *frame_req;
};
+/**
+ * struct cam_lrme_hw_dump_args : Args for dump request
+ *
+ * @cpu_addr : start address of the target buffer
+ * @offset : offset of the buffer
+ * @request_id : Issue request id
+ * @buf_len : Length of target buffer
+ */
+struct cam_lrme_hw_dump_args {
+ uintptr_t cpu_addr;
+ uint64_t offset;
+ uint64_t request_id;
+ size_t buf_len;
+};
+
#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 6669e29f532c..29fb49b928df 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -580,17 +580,6 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
WARN_ON(!mutex_is_locked(&ctx->lock));
for (i = 0; i < MAX_LOADED_NETWORK; i++) {
- if ((network->id != 0) &&
- (network->client != client)) {
- pr_err("NPU is used by other client now\n");
- return NULL;
- }
-
- network++;
- }
-
- network = ctx->networks;
- for (i = 0; i < MAX_LOADED_NETWORK; i++) {
if (network->id == 0)
break;
diff --git a/drivers/media/platform/msm/npu_v2/npu_common.h b/drivers/media/platform/msm/npu_v2/npu_common.h
index 505ef6e7529b..28f9cc8d7990 100644
--- a/drivers/media/platform/msm/npu_v2/npu_common.h
+++ b/drivers/media/platform/msm/npu_v2/npu_common.h
@@ -47,11 +47,14 @@
#define NUM_MAX_CLK_NUM 48
#define NPU_MAX_REGULATOR_NUM 2
-#define NPU_MAX_DT_NAME_LEN 21
+#define NPU_MAX_DT_NAME_LEN 21
#define NPU_MAX_PWRLEVELS 8
-#define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_PATCH_NUM 160
#define NPU_MAX_BW_DEVS 4
+#define PERF_MODE_DEFAULT 0
+
enum npu_power_level {
NPU_PWRLEVEL_MINSVS = 0,
NPU_PWRLEVEL_LOWSVS,
@@ -167,6 +170,8 @@ struct npu_reg {
* @uc_pwrlevel - power level from user driver setting
* @perf_mode_override - perf mode from sysfs to override perf mode
* settings from user driver
+ * @dcvs_mode - dcvs mode from sysfs to turn on dcvs mode
+ * settings from user driver
* @devbw - bw device
*/
struct npu_pwrctrl {
@@ -186,6 +191,8 @@ struct npu_pwrctrl {
uint32_t cdsprm_pwrlevel;
uint32_t fmax_pwrlevel;
uint32_t perf_mode_override;
+ uint32_t dcvs_mode;
+ uint32_t cur_dcvs_activity;
};
/*
@@ -249,7 +256,7 @@ struct npu_device {
struct npu_io_data core_io;
struct npu_io_data tcm_io;
struct npu_io_data cc_io;
- struct npu_io_data qdsp_io;
+ struct npu_io_data tcsr_io;
struct npu_io_data apss_shared_io;
struct npu_io_data qfprom_io;
diff --git a/drivers/media/platform/msm/npu_v2/npu_debugfs.c b/drivers/media/platform/msm/npu_v2/npu_debugfs.c
index f277e446d7fd..137e733981e1 100644
--- a/drivers/media/platform/msm/npu_v2/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu_v2/npu_debugfs.c
@@ -29,8 +29,7 @@
*/
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
-static ssize_t npu_debug_reg_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos);
+static int npu_debug_reg_release(struct inode *inode, struct file *file);
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_off_write(struct file *file,
@@ -45,13 +44,12 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
/*
* Variables
*/
-struct npu_device *g_npu_dev;
+static struct npu_device *g_npu_dev;
static const struct file_operations npu_reg_fops = {
.open = npu_debug_open,
- .release = npu_debug_release,
+ .release = npu_debug_reg_release,
.read = npu_debug_reg_read,
- .write = npu_debug_reg_write,
};
static const struct file_operations npu_off_fops = {
@@ -88,6 +86,11 @@ static int npu_debug_open(struct inode *inode, struct file *file)
static int npu_debug_release(struct inode *inode, struct file *file)
{
+ return 0;
+}
+
+static int npu_debug_reg_release(struct inode *inode, struct file *file)
+{
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
@@ -102,41 +105,6 @@ static int npu_debug_release(struct inode *inode, struct file *file)
/*
* Function Implementations - Reg Read/Write
*/
-static ssize_t npu_debug_reg_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- size_t off;
- uint32_t data, cnt;
- struct npu_device *npu_dev = file->private_data;
- char buf[24];
-
- if (count >= sizeof(buf))
- return -EINVAL;
-
- if (copy_from_user(buf, user_buf, count))
- return -EFAULT;
-
- buf[count] = 0; /* end of string */
-
- cnt = sscanf(buf, "%zx %x", &off, &data);
- NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
-
- return count;
- if (cnt < 2)
- return -EINVAL;
-
- if (npu_enable_core_power(npu_dev))
- return -EPERM;
-
- REGW(npu_dev, off, data);
-
- npu_disable_core_power(npu_dev);
-
- NPU_DBG("write: addr=%zx data=%x\n", off, data);
-
- return count;
-}
-
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
@@ -255,6 +223,7 @@ static ssize_t npu_debug_off_read(struct file *file,
len = scnprintf(buf, sizeof(buf), "offset=0x%08x cnt=%d\n",
debugfs->reg_off, debugfs->reg_cnt);
+ len = min(len, count);
if (copy_to_user(user_buf, buf, len)) {
NPU_ERR("failed to copy to user\n");
@@ -283,49 +252,21 @@ static ssize_t npu_debug_log_read(struct file *file,
mutex_lock(&debugfs->log_lock);
if (debugfs->log_num_bytes_buffered != 0) {
- if ((debugfs->log_read_index +
- debugfs->log_num_bytes_buffered) >
- debugfs->log_buf_size) {
- /* Wrap around case */
- uint32_t remaining_to_end = debugfs->log_buf_size -
- debugfs->log_read_index;
- uint8_t *src_addr = debugfs->log_buf +
- debugfs->log_read_index;
- void __user *dst_addr = (void __user *) user_buf;
-
- if (copy_to_user(dst_addr, src_addr,
- remaining_to_end)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- src_addr = debugfs->log_buf;
- dst_addr = (void __user *)(user_buf + remaining_to_end);
- if (copy_to_user(dst_addr, src_addr,
- debugfs->log_num_bytes_buffered -
- remaining_to_end)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index =
- debugfs->log_num_bytes_buffered -
- remaining_to_end;
- } else {
- if (copy_to_user(user_buf, (debugfs->log_buf +
- debugfs->log_read_index),
- debugfs->log_num_bytes_buffered)) {
- NPU_ERR("failed to copy to user\n");
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index +=
- debugfs->log_num_bytes_buffered;
- if (debugfs->log_read_index == debugfs->log_buf_size)
- debugfs->log_read_index = 0;
+ len = min(debugfs->log_num_bytes_buffered,
+ debugfs->log_buf_size - debugfs->log_read_index);
+ len = min(count, len);
+ if (copy_to_user(user_buf, (debugfs->log_buf +
+ debugfs->log_read_index), len)) {
+ NPU_ERR("failed to copy to user\n");
+ mutex_unlock(&debugfs->log_lock);
+ return -EFAULT;
}
- len = debugfs->log_num_bytes_buffered;
- debugfs->log_num_bytes_buffered = 0;
+ debugfs->log_read_index += len;
+ if (debugfs->log_read_index == debugfs->log_buf_size)
+ debugfs->log_read_index = 0;
+
+ debugfs->log_num_bytes_buffered -= len;
+ *ppos += len;
}
/* mutex log unlock */
diff --git a/drivers/media/platform/msm/npu_v2/npu_dev.c b/drivers/media/platform/msm/npu_v2/npu_dev.c
index 1ad17424960b..932e73dcbd9e 100644
--- a/drivers/media/platform/msm/npu_v2/npu_dev.c
+++ b/drivers/media/platform/msm/npu_v2/npu_dev.c
@@ -35,7 +35,6 @@
#define DDR_MAPPED_START_ADDR 0x80000000
#define DDR_MAPPED_SIZE 0x60000000
-#define PERF_MODE_DEFAULT 0
#define MBOX_OP_TIMEOUTMS 1000
/*
@@ -62,6 +61,12 @@ static ssize_t perf_mode_override_show(struct device *dev,
static ssize_t perf_mode_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
+static ssize_t dcvs_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static ssize_t dcvs_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
static ssize_t boot_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
@@ -89,6 +94,11 @@ static int npu_exec_network_v2(struct npu_client *client,
unsigned long arg);
static int npu_receive_event(struct npu_client *client,
unsigned long arg);
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable);
+static int npu_set_property(struct npu_client *client,
+ unsigned long arg);
+static int npu_get_property(struct npu_client *client,
+ unsigned long arg);
static long npu_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p);
@@ -157,11 +167,13 @@ static DEVICE_ATTR_RO(caps);
static DEVICE_ATTR_RW(pwr);
static DEVICE_ATTR_RW(perf_mode_override);
static DEVICE_ATTR_WO(boot);
+static DEVICE_ATTR_RW(dcvs_mode);
static struct attribute *npu_fs_attrs[] = {
&dev_attr_caps.attr,
&dev_attr_pwr.attr,
&dev_attr_perf_mode_override.attr,
+ &dev_attr_dcvs_mode.attr,
&dev_attr_boot.attr,
NULL
};
@@ -277,8 +289,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct npu_client client;
struct npu_device *npu_dev = dev_get_drvdata(dev);
- struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
uint32_t val;
int rc;
@@ -289,11 +301,53 @@ static ssize_t perf_mode_override_store(struct device *dev,
}
val = min(val, npu_dev->pwrctrl.num_pwrlevels);
- mutex_lock(&host_ctx->lock);
- npu_dev->pwrctrl.perf_mode_override = val;
- NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
- npu_set_power_level(npu_dev, true);
- mutex_unlock(&host_ctx->lock);
+ NPU_INFO("setting perf mode to %d\n", val);
+ client.npu_dev = npu_dev;
+ npu_host_set_perf_mode(&client, 0, val);
+
+ return count;
+}
+
+static ssize_t dcvs_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct npu_device *npu_dev = dev_get_drvdata(dev);
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", pwr->dcvs_mode);
+}
+
+static ssize_t dcvs_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct npu_device *npu_dev = dev_get_drvdata(dev);
+ struct msm_npu_property prop;
+ uint32_t val;
+ int ret = 0;
+
+ ret = kstrtou32(buf, 10, &val);
+ if (ret) {
+ NPU_ERR("Invalid input for dcvs mode setting\n");
+ return -EINVAL;
+ }
+
+ val = min(val, (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+ NPU_DBG("sysfs: setting dcvs_mode to %d\n", val);
+
+ prop.prop_id = MSM_NPU_PROP_ID_DCVS_MODE;
+ prop.num_of_params = 1;
+ prop.network_hdl = 0;
+ prop.prop_param[0] = val;
+
+ ret = npu_host_set_fw_property(npu_dev, &prop);
+ if (ret) {
+ NPU_ERR("npu_host_set_fw_property failed %d\n", ret);
+ return ret;
+ }
+
+ npu_dev->pwrctrl.dcvs_mode = val;
return count;
}
@@ -338,20 +392,20 @@ int npu_enable_core_power(struct npu_device *npu_dev)
mutex_lock(&npu_dev->dev_lock);
NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
if (!pwr->pwr_vote_num) {
- ret = npu_enable_regulators(npu_dev);
+ ret = npu_set_bw(npu_dev, 100, 100);
if (ret)
goto fail;
- ret = npu_set_bw(npu_dev, 100, 100);
+ ret = npu_enable_regulators(npu_dev);
if (ret) {
- npu_disable_regulators(npu_dev);
+ npu_set_bw(npu_dev, 0, 0);
goto fail;
}
ret = npu_enable_core_clocks(npu_dev);
if (ret) {
- npu_set_bw(npu_dev, 0, 0);
npu_disable_regulators(npu_dev);
+ npu_set_bw(npu_dev, 0, 0);
goto fail;
}
npu_resume_devbw(npu_dev);
@@ -378,11 +432,12 @@ void npu_disable_core_power(struct npu_device *npu_dev)
if (!pwr->pwr_vote_num) {
npu_suspend_devbw(npu_dev);
npu_disable_core_clocks(npu_dev);
- npu_set_bw(npu_dev, 0, 0);
npu_disable_regulators(npu_dev);
+ npu_set_bw(npu_dev, 0, 0);
pwr->active_pwrlevel = pwr->default_pwrlevel;
pwr->uc_pwrlevel = pwr->max_pwrlevel;
pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+ pwr->cur_dcvs_activity = pwr->num_pwrlevels;
NPU_DBG("setting back to power level=%d\n",
pwr->active_pwrlevel);
}
@@ -443,14 +498,6 @@ static uint32_t npu_calc_power_level(struct npu_device *npu_dev)
uint32_t uc_pwr_level = npu_dev->pwrctrl.uc_pwrlevel;
/*
- * if perf_mode_override is not 0, use it to override
- * uc_pwrlevel
- */
- if (npu_dev->pwrctrl.perf_mode_override > 0)
- uc_pwr_level = npu_power_level_from_index(npu_dev,
- npu_dev->pwrctrl.perf_mode_override - 1);
-
- /*
* pick the lowese power level between thermal power and usecase power
* settings
*/
@@ -556,11 +603,8 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
uint32_t uc_pwrlevel_to_set;
- if (perf_mode == PERF_MODE_DEFAULT)
- uc_pwrlevel_to_set = pwr->default_pwrlevel;
- else
- uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
- perf_mode - 1);
+ uc_pwrlevel_to_set = npu_power_level_from_index(npu_dev,
+ perf_mode - 1);
if (uc_pwrlevel_to_set > pwr->max_pwrlevel)
uc_pwrlevel_to_set = pwr->max_pwrlevel;
@@ -839,6 +883,7 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
if (host_ctx->power_vote_num > 0) {
for (i = 0; i < npu_dev->regulator_num; i++)
regulator_disable(regulators[i].regulator);
+
host_ctx->power_vote_num--;
}
}
@@ -1130,9 +1175,10 @@ static int npu_load_network_v2(struct npu_client *client,
return -EFAULT;
}
- if (req.patch_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
+ if ((req.patch_info_num > NPU_MAX_PATCH_NUM) ||
+ (req.patch_info_num == 0)) {
NPU_ERR("Invalid patch info num %d[max:%d]\n",
- req.patch_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
+ req.patch_info_num, NPU_MAX_PATCH_NUM);
return -EINVAL;
}
@@ -1217,9 +1263,10 @@ static int npu_exec_network_v2(struct npu_client *client,
return -EFAULT;
}
- if (req.patch_buf_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
+ if ((req.patch_buf_info_num > NPU_MAX_PATCH_NUM) ||
+ (req.patch_buf_info_num == 0)) {
NPU_ERR("Invalid patch buf info num %d[max:%d]\n",
- req.patch_buf_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
+ req.patch_buf_info_num, NPU_MAX_PATCH_NUM);
return -EINVAL;
}
@@ -1313,6 +1360,43 @@ static int npu_receive_event(struct npu_client *client,
return ret;
}
+static int npu_set_fw_state(struct npu_client *client, uint32_t enable)
+{
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ int rc = 0;
+
+ if (host_ctx->network_num > 0) {
+ NPU_ERR("Need to unload network first\n");
+ mutex_unlock(&npu_dev->dev_lock);
+ return -EINVAL;
+ }
+
+ if (enable) {
+ NPU_DBG("enable fw\n");
+ rc = enable_fw(npu_dev);
+ if (rc) {
+ NPU_ERR("enable fw failed\n");
+ } else {
+ host_ctx->npu_init_cnt++;
+ NPU_DBG("npu_init_cnt %d\n",
+ host_ctx->npu_init_cnt);
+ /* set npu to lowest power level */
+ if (npu_set_uc_power_level(npu_dev, 1))
+ NPU_WARN("Failed to set uc power level\n");
+ }
+ } else if (host_ctx->npu_init_cnt > 0) {
+ NPU_DBG("disable fw\n");
+ disable_fw(npu_dev);
+ host_ctx->npu_init_cnt--;
+ NPU_DBG("npu_init_cnt %d\n", host_ctx->npu_init_cnt);
+ } else {
+ NPU_ERR("can't disable fw %d\n", host_ctx->npu_init_cnt);
+ }
+
+ return rc;
+}
+
static int npu_set_property(struct npu_client *client,
unsigned long arg)
{
@@ -1327,9 +1411,19 @@ static int npu_set_property(struct npu_client *client,
}
switch (prop.prop_id) {
+ case MSM_NPU_PROP_ID_FW_STATE:
+ ret = npu_set_fw_state(client,
+ (uint32_t)prop.prop_param[0]);
+ break;
+ case MSM_NPU_PROP_ID_PERF_MODE:
+ ret = npu_host_set_perf_mode(client,
+ (uint32_t)prop.network_hdl,
+ (uint32_t)prop.prop_param[0]);
+ break;
default:
- NPU_ERR("Not supported property %d\n", prop.prop_id);
- ret = -EINVAL;
+ ret = npu_host_set_fw_property(client->npu_dev, &prop);
+ if (ret)
+ NPU_ERR("npu_host_set_fw_property failed\n");
break;
}
@@ -1355,6 +1449,10 @@ static int npu_get_property(struct npu_client *client,
case MSM_NPU_PROP_ID_FW_STATE:
prop.prop_param[0] = host_ctx->fw_state;
break;
+ case MSM_NPU_PROP_ID_PERF_MODE:
+ prop.prop_param[0] = npu_host_get_perf_mode(client,
+ (uint32_t)prop.network_hdl);
+ break;
case MSM_NPU_PROP_ID_PERF_MODE_MAX:
prop.prop_param[0] = npu_dev->pwrctrl.num_pwrlevels;
break;
@@ -1365,13 +1463,17 @@ static int npu_get_property(struct npu_client *client,
prop.prop_param[0] = npu_dev->hw_version;
break;
default:
- NPU_ERR("Not supported property %d\n", prop.prop_id);
- return -EINVAL;
+ ret = npu_host_get_fw_property(client->npu_dev, &prop);
+ if (ret) {
+ NPU_ERR("npu_host_set_fw_property failed\n");
+ return ret;
+ }
+ break;
}
ret = copy_to_user(argp, &prop, sizeof(prop));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
@@ -1730,6 +1832,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
pwr->uc_pwrlevel = pwr->max_pwrlevel;
pwr->perf_mode_override = 0;
pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
+ pwr->cur_dcvs_activity = pwr->num_pwrlevels;
return 0;
}
@@ -1854,8 +1957,8 @@ static int npu_ipcc_bridge_mbox_send_data(struct mbox_chan *chan, void *data)
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
unsigned long flags;
- NPU_DBG("Generating IRQ for client_id: %u; signal_id: %u\n",
- ipcc_mbox_chan->client_id, ipcc_mbox_chan->signal_id);
+ NPU_DBG("Generating IRQ for signal_id: %u\n",
+ ipcc_mbox_chan->signal_id);
spin_lock_irqsave(&host_ctx->bridge_mbox_lock, flags);
ipcc_mbox_chan->npu_mbox->send_data_pending = true;
@@ -1887,7 +1990,7 @@ static struct mbox_chan *npu_ipcc_bridge_mbox_xlate(
npu_dev = bridge_data->priv_data;
- if (ph->args_count != 2)
+ if (ph->args_count != 1)
return ERR_PTR(-EINVAL);
for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
@@ -1895,8 +1998,7 @@ static struct mbox_chan *npu_ipcc_bridge_mbox_xlate(
if (!ipcc_mbox_chan)
break;
- else if (ipcc_mbox_chan->client_id == ph->args[0] &&
- ipcc_mbox_chan->signal_id == ph->args[1])
+ else if (ipcc_mbox_chan->signal_id == ph->args[0])
return ERR_PTR(-EBUSY);
}
@@ -1906,16 +2008,15 @@ static struct mbox_chan *npu_ipcc_bridge_mbox_xlate(
/* search for target mailbox */
for (i = 0; i < NPU_MAX_MBOX_NUM; i++) {
if (npu_dev->mbox[i].chan &&
- (npu_dev->mbox[i].client_id == ph->args[0]) &&
- (npu_dev->mbox[i].signal_id == ph->args[1])) {
+ (npu_dev->mbox[i].signal_id == ph->args[0])) {
NPU_DBG("Find matched target mailbox %d\n", i);
break;
}
}
if (i == NPU_MAX_MBOX_NUM) {
- NPU_ERR("Can't find matched target mailbox %d:%d\n",
- ph->args[0], ph->args[1]);
+ NPU_ERR("Can't find matched target mailbox %d\n",
+ ph->args[0]);
return ERR_PTR(-EINVAL);
}
@@ -1923,16 +2024,14 @@ static struct mbox_chan *npu_ipcc_bridge_mbox_xlate(
if (!ipcc_mbox_chan)
return ERR_PTR(-ENOMEM);
- ipcc_mbox_chan->client_id = ph->args[0];
- ipcc_mbox_chan->signal_id = ph->args[1];
+ ipcc_mbox_chan->signal_id = ph->args[0];
ipcc_mbox_chan->chan = &bridge_data->chans[chan_id];
ipcc_mbox_chan->npu_dev = npu_dev;
ipcc_mbox_chan->chan->con_priv = ipcc_mbox_chan;
ipcc_mbox_chan->npu_mbox = &npu_dev->mbox[i];
- NPU_DBG("New mailbox channel: %u for client_id: %u; signal_id: %u\n",
- chan_id, ipcc_mbox_chan->client_id,
- ipcc_mbox_chan->signal_id);
+ NPU_DBG("New mailbox channel: %u for signal_id: %u\n",
+ chan_id, ipcc_mbox_chan->signal_id);
return ipcc_mbox_chan->chan;
}
@@ -2037,11 +2136,9 @@ static int npu_mbox_init(struct npu_device *npu_dev)
NPU_WARN("can't get mailbox %s args\n",
mbox_name);
} else {
- mbox->client_id = curr_ph.args[0];
- mbox->signal_id = curr_ph.args[1];
- NPU_DBG("argument for mailbox %x is %x %x\n",
- mbox_name, curr_ph.args[0],
- curr_ph.args[1]);
+ mbox->signal_id = curr_ph.args[0];
+ NPU_DBG("argument for mailbox %x is %x\n",
+ mbox_name, curr_ph.args[0]);
}
}
index++;
@@ -2098,6 +2195,7 @@ static int npu_probe(struct platform_device *pdev)
return -EFAULT;
npu_dev->pdev = pdev;
+ mutex_init(&npu_dev->dev_lock);
platform_set_drvdata(pdev, npu_dev);
res = platform_get_resource_byname(pdev,
@@ -2158,6 +2256,25 @@ static int npu_probe(struct platform_device *pdev)
res->start, npu_dev->cc_io.base);
res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tcsr");
+ if (!res) {
+ NPU_ERR("unable to get tcsr_mutex resource\n");
+ rc = -ENODEV;
+ goto error_get_dev_num;
+ }
+ npu_dev->tcsr_io.size = resource_size(res);
+ npu_dev->tcsr_io.phy_addr = res->start;
+ npu_dev->tcsr_io.base = devm_ioremap(&pdev->dev, res->start,
+ npu_dev->tcsr_io.size);
+ if (unlikely(!npu_dev->tcsr_io.base)) {
+ NPU_ERR("unable to map tcsr\n");
+ rc = -ENOMEM;
+ goto error_get_dev_num;
+ }
+ NPU_DBG("tcsr phy address=0x%llx virt=%pK\n",
+ res->start, npu_dev->tcsr_io.base);
+
+ res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "apss_shared");
if (!res) {
NPU_ERR("unable to get apss_shared resource\n");
@@ -2267,8 +2384,6 @@ static int npu_probe(struct platform_device *pdev)
goto error_driver_init;
}
- mutex_init(&npu_dev->dev_lock);
-
rc = npu_host_init(npu_dev);
if (rc) {
NPU_ERR("unable to init host\n");
diff --git a/drivers/media/platform/msm/npu_v2/npu_host_ipc.c b/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
index 9dac4a945591..e2fc1ff9740a 100644
--- a/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
@@ -42,7 +42,7 @@ static const struct npu_queue_tuple npu_q_setup[6] = {
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE },
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
- { 1024, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
+ { 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE },
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE },
};
@@ -238,6 +238,13 @@ static int ipc_queue_read(struct npu_device *npu_dev,
status = -EPERM;
goto exit;
}
+
+ if (packet_size > NPU_IPC_BUF_LENGTH) {
+ NPU_ERR("Invalid packet size %d\n", packet_size);
+ status = -EINVAL;
+ goto exit;
+ }
+
new_read_idx = queue.qhdr_read_idx + packet_size;
if (new_read_idx < (queue.qhdr_q_size)) {
diff --git a/drivers/media/platform/msm/npu_v2/npu_host_ipc.h b/drivers/media/platform/msm/npu_v2/npu_host_ipc.h
index 9c7ee2784e94..f6136d7a32c5 100644
--- a/drivers/media/platform/msm/npu_v2/npu_host_ipc.h
+++ b/drivers/media/platform/msm/npu_v2/npu_host_ipc.h
@@ -39,6 +39,10 @@
#define NPU_IPC_CMD_EXECUTE_V2 0x0000000A
/* npu_ipc_cmd_notify_pwr_packet_t */
#define NPU_IPC_CMD_NOTIFY_PWR 0x0000000B
+/* ipc_cmd_set_property_packet */
+#define NPU_IPC_CMD_SET_PROPERTY 0x0000000C
+/* ipc_cmd_get_property_packet */
+#define NPU_IPC_CMD_GET_PROPERTY 0x0000000D
/* Messages sent **from** NPU */
/* IPC Message Response -- uint32_t */
@@ -58,6 +62,15 @@
#define NPU_IPC_MSG_EXECUTE_V2_DONE 0x00010006
/* struct ipc_msg_notify_pwr_pkt */
#define NPU_IPC_MSG_NOTIFY_PWR_DONE 0x00010007
+/* ipc_msg_set_property_packet */
+#define NPU_IPC_MSG_SET_PROPERTY_DONE 0x00010008
+/* ipc_msg_get_property_packet */
+#define NPU_IPC_MSG_GET_PROPERTY_DONE 0x00010009
+/* ipc_msg_general_notify_pkt */
+#define NPU_IPC_MSG_GENERAL_NOTIFY 0x00010010
+
+/* IPC Notify Message Type -- uint32_t */
+#define NPU_NOTIFY_DCVS_MODE 0x00002000
/* Logging message size */
/* Number 32-bit elements for the maximum log message size */
@@ -106,6 +119,9 @@
/* Debug stats */
#define NUM_LAYER_STATS_PER_EXE_MSG_MAX 110
+/* DCVS */
+#define NPU_DCVS_ACTIVITY_MAX_PERF 0x100
+
/*
* Data Structures
*/
@@ -274,6 +290,40 @@ struct ipc_cmd_loopback_pkt {
};
/*
+ * Generic property definition
+ */
+struct ipc_cmd_prop_pkt {
+ struct ipc_cmd_header_pkt header;
+ uint32_t prop_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t prop_param[0];
+};
+
+/*
+ * Generic property response packet definition
+ */
+struct ipc_msg_prop_pkt {
+ struct ipc_msg_header_pkt header;
+ uint32_t prop_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t prop_param[0];
+};
+
+/*
+ * Generic notify message packet definition
+ */
+struct ipc_msg_general_notify_pkt {
+ struct ipc_msg_header_pkt header;
+ uint32_t notify_id;
+ uint32_t num_params;
+ uint32_t network_hdl;
+ uint32_t notify_param[0];
+};
+
+
+/*
* LOAD response packet definition
*/
struct ipc_msg_load_pkt {
diff --git a/drivers/media/platform/msm/npu_v2/npu_hw.h b/drivers/media/platform/msm/npu_v2/npu_hw.h
index bbc03ef83c95..dff62874a5b7 100644
--- a/drivers/media/platform/msm/npu_v2/npu_hw.h
+++ b/drivers/media/platform/msm/npu_v2/npu_hw.h
@@ -78,4 +78,7 @@
#define NPU_CC_NPU_MASTERn_WDOG_BITE_IRQ_OWNER(n) (0x0006010+4*(n))
#define NPU_CC_NPU_MASTERn_WDOG_BITE_IRQ_STATUS(n) (0x00009030+0x1000*(n))
+#define TCSR_NPU_CPC_PWR_ON (0x0003700C)
+#define NPU_CPC_PWR_ON (1 << 0)
+
#endif /* NPU_HW_H */
diff --git a/drivers/media/platform/msm/npu_v2/npu_hw_access.c b/drivers/media/platform/msm/npu_v2/npu_hw_access.c
index 13b31d846f30..e0269210fe6b 100644
--- a/drivers/media/platform/msm/npu_v2/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu_v2/npu_hw_access.c
@@ -25,73 +25,93 @@
/*
* Functions - Register
*/
-uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off)
+static uint32_t npu_reg_read(void __iomem *base, size_t size, uint32_t off)
{
- uint32_t ret = 0;
+ if (!base) {
+ NPU_ERR("NULL base address\n");
+ return 0;
+ }
- ret = readl_relaxed(npu_dev->core_io.base + off);
- return ret;
+ if ((off % 4) != 0) {
+ NPU_ERR("offset %x is not aligned\n", off);
+ return 0;
+ }
+
+ if (off >= size) {
+ NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+ return 0;
+ }
+
+ return readl_relaxed(base + off);
}
-void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
+static void npu_reg_write(void __iomem *base, size_t size, uint32_t off,
+ uint32_t val)
{
- writel_relaxed(val, npu_dev->core_io.base + off);
+ if (!base) {
+ NPU_ERR("NULL base address\n");
+ return;
+ }
+
+ if ((off % 4) != 0) {
+ NPU_ERR("offset %x is not aligned\n", off);
+ return;
+ }
+
+ if (off >= size) {
+ NPU_ERR("offset exceeds io region %x:%x\n", off, size);
+ return;
+ }
+
+ writel_relaxed(val, base + off);
__iowmb();
}
-uint32_t npu_qdsp_reg_read(struct npu_device *npu_dev, uint32_t off)
+uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
+ return npu_reg_read(npu_dev->core_io.base, npu_dev->core_io.size, off);
+}
- ret = readl_relaxed(npu_dev->qdsp_io.base + off);
- return ret;
+void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
+{
+ npu_reg_write(npu_dev->core_io.base, npu_dev->core_io.size,
+ off, val);
}
-void npu_qdsp_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
+uint32_t npu_tcsr_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- writel_relaxed(val, npu_dev->qdsp_io.base + off);
- __iowmb();
+ return npu_reg_read(npu_dev->tcsr_io.base, npu_dev->tcsr_io.size, off);
}
uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl_relaxed(npu_dev->apss_shared_io.base + off);
- return ret;
+ return npu_reg_read(npu_dev->apss_shared_io.base,
+ npu_dev->apss_shared_io.size, off);
}
void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val)
{
- writel_relaxed(val, npu_dev->apss_shared_io.base + off);
- __iowmb();
+ npu_reg_write(npu_dev->apss_shared_io.base,
+ npu_dev->apss_shared_io.size, off, val);
}
uint32_t npu_cc_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- ret = readl_relaxed(npu_dev->cc_io.base + off);
-
- return ret;
+ return npu_reg_read(npu_dev->cc_io.base, npu_dev->cc_io.size, off);
}
void npu_cc_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val)
{
- writel_relaxed(val, npu_dev->cc_io.base + off);
- __iowmb();
+ npu_reg_write(npu_dev->cc_io.base, npu_dev->cc_io.size,
+ off, val);
}
uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
{
- uint32_t ret = 0;
-
- if (npu_dev->qfprom_io.base)
- ret = readl_relaxed(npu_dev->qfprom_io.base + off);
-
- return ret;
+ return npu_reg_read(npu_dev->qfprom_io.base,
+ npu_dev->qfprom_io.size, off);
}
/*
@@ -106,6 +126,13 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t i = 0;
uint32_t num = 0;
+ if (dst_off >= npu_dev->tcm_io.size ||
+ (npu_dev->tcm_io.size - dst_off) < size) {
+ NPU_ERR("memory write exceeds io region %x:%x:%x\n",
+ dst_off, size, npu_dev->tcm_io.size);
+ return;
+ }
+
num = size/4;
for (i = 0; i < num; i++) {
writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -132,6 +159,13 @@ int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
uint32_t i = 0;
uint32_t num = 0;
+ if (src_off >= npu_dev->tcm_io.size ||
+ (npu_dev->tcm_io.size - src_off) < size) {
+ NPU_ERR("memory read exceeds io region %x:%x:%x\n",
+ src_off, size, npu_dev->tcm_io.size);
+ return 0;
+ }
+
num = size/4;
for (i = 0; i < num; i++) {
out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off);
@@ -163,7 +197,7 @@ void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num)
int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev)
{
- npu_apss_shared_reg_write(npu_dev, APSS_SHARED_IPC_INTERRUPT_1, 0x40);
+ npu_apss_shared_reg_write(npu_dev, APSS_SHARED_IPC_INTERRUPT_1, 0x20);
return 0;
}
diff --git a/drivers/media/platform/msm/npu_v2/npu_hw_access.h b/drivers/media/platform/msm/npu_v2/npu_hw_access.h
index 961710dc1cfe..1cd6a17917a7 100644
--- a/drivers/media/platform/msm/npu_v2/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu_v2/npu_hw_access.h
@@ -53,8 +53,7 @@ typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr);
*/
uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off);
void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
-uint32_t npu_qdsp_reg_read(struct npu_device *npu_dev, uint32_t off);
-void npu_qdsp_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
+uint32_t npu_tcsr_reg_read(struct npu_device *npu_dev, uint32_t off);
uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off);
void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val);
diff --git a/drivers/media/platform/msm/npu_v2/npu_mgr.c b/drivers/media/platform/msm/npu_v2/npu_mgr.c
index 3910ed95aade..ee66db2287ff 100644
--- a/drivers/media/platform/msm/npu_v2/npu_mgr.c
+++ b/drivers/media/platform/msm/npu_v2/npu_mgr.c
@@ -30,7 +30,7 @@
#define LOG_MSG_TOTAL_SIZE_INDEX 0
#define LOG_MSG_MSG_ID_INDEX 1
-#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 20
+#define NPU_FW_TIMEOUT_POLL_INTERVAL_MS 10
#define NPU_FW_TIMEOUT_MS 5000
/*
@@ -43,7 +43,8 @@ static void npu_disable_fw_work(struct work_struct *work);
static void npu_update_pwr_work(struct work_struct *work);
static void turn_off_fw_logging(struct npu_device *npu_dev);
static int wait_for_status_ready(struct npu_device *npu_dev,
- uint32_t status_reg, uint32_t status_bits);
+ uint32_t status_reg, uint32_t status_bits, bool poll);
+static int wait_npu_cpc_power_off(struct npu_device *npu_dev);
static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
struct npu_client *client);
static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx,
@@ -69,10 +70,37 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
uint32_t pwr_level, bool post);
static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
static void disable_fw_nolock(struct npu_device *npu_dev);
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
/*
* Function Definitions - Init / Deinit
*/
+
+static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
+{
+ uint32_t reg_val = NPU_CPC_PWR_ON;
+ uint32_t wait_cnt = 0, max_wait_ms;
+
+ max_wait_ms = NPU_FW_TIMEOUT_MS;
+
+ do {
+ reg_val = npu_tcsr_reg_read(npu_dev, TCSR_NPU_CPC_PWR_ON);
+ if (!(reg_val & NPU_CPC_PWR_ON)) {
+ NPU_DBG("npu cpc powers off\n");
+ break;
+ }
+
+ wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
+ if (wait_cnt > max_wait_ms) {
+ NPU_ERR("timeout wait for cpc power off\n");
+ return -EPERM;
+ }
+ msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
+ } while (1);
+
+ return 0;
+}
+
static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
@@ -103,7 +131,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
/* Keep reading ctrl status until NPU is ready */
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+ FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
ret = -EPERM;
goto load_fw_fail;
}
@@ -141,7 +169,7 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
NPU_ERR("Wait for fw shutdown timedout\n");
ret = -ETIMEDOUT;
} else {
- ret = 0;
+ ret = wait_npu_cpc_power_off(npu_dev);
}
load_fw_fail:
@@ -287,7 +315,7 @@ static int enable_fw_nolock(struct npu_device *npu_dev)
goto notify_fw_pwr_fail;
}
- ret = wait_for_completion_interruptible_timeout(
+ ret = wait_for_completion_timeout(
&host_ctx->fw_bringup_done, NW_CMD_TIMEOUT);
if (!ret) {
NPU_ERR("Wait for fw bringup timedout\n");
@@ -336,6 +364,7 @@ int enable_fw(struct npu_device *npu_dev)
static void disable_fw_nolock(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ int ret = 0;
if (!host_ctx->fw_ref_cnt) {
NPU_WARN("fw_ref_cnt is 0\n");
@@ -361,10 +390,14 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
msleep(500);
}
- if (!host_ctx->auto_pil_disable
- && !wait_for_completion_interruptible_timeout(
- &host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT))
- NPU_ERR("Wait for fw shutdown timedout\n");
+ if (!host_ctx->auto_pil_disable) {
+ ret = wait_for_completion_timeout(
+ &host_ctx->fw_shutdown_done, NW_CMD_TIMEOUT);
+ if (!ret)
+ NPU_ERR("Wait for fw shutdown timedout\n");
+ else
+ ret = wait_npu_cpc_power_off(npu_dev);
+ }
npu_disable_irq(npu_dev);
npu_disable_sys_cache(npu_dev);
@@ -430,7 +463,7 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
NPU_ERR("NPU_IPC_CMD_NOTIFY_PWR sent failed: %d\n", ret);
} else {
ret = wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL);
+ FW_CTRL_STATUS_PWR_NOTIFY_DONE_VAL, true);
if (!ret) {
reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
if (reg_val & FW_CTRL_STATUS_PWR_NOTIFY_ERR_VAL) {
@@ -593,12 +626,16 @@ int npu_host_init(struct npu_device *npu_dev)
if (IS_ERR(host_ctx->notif_hdle)) {
NPU_ERR("register event notification failed\n");
sts = PTR_ERR(host_ctx->notif_hdle);
- return sts;
+ host_ctx->notif_hdle = NULL;
+ goto fail;
}
- host_ctx->wq = create_workqueue("npu_irq_hdl");
- if (!host_ctx->wq) {
+ host_ctx->wq = create_workqueue("npu_general_wq");
+ host_ctx->wq_pri =
+ alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
+ if (!host_ctx->wq || !host_ctx->wq_pri) {
sts = -EPERM;
+ goto fail;
} else {
INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
@@ -609,8 +646,33 @@ int npu_host_init(struct npu_device *npu_dev)
npu_disable_fw_work);
}
- host_ctx->auto_pil_disable = true;
+ host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+ if (!host_ctx->ipc_msg_buf) {
+ NPU_ERR("Failed to allocate ipc buffer\n");
+ sts = -ENOMEM;
+ goto fail;
+ }
+ host_ctx->prop_buf = kzalloc(sizeof(struct msm_npu_property),
+ GFP_KERNEL);
+ if (!host_ctx->prop_buf) {
+ sts = -ENOMEM;
+ goto fail;
+ }
+
+ host_ctx->auto_pil_disable = false;
+
+ return sts;
+fail:
+ kfree(host_ctx->ipc_msg_buf);
+ if (host_ctx->wq)
+ destroy_workqueue(host_ctx->wq);
+ if (host_ctx->wq_pri)
+ destroy_workqueue(host_ctx->wq_pri);
+ if (host_ctx->notif_hdle)
+ subsys_notif_unregister_notifier(host_ctx->notif_hdle,
+ &host_ctx->nb);
+ mutex_destroy(&host_ctx->lock);
return sts;
}
@@ -618,7 +680,11 @@ void npu_host_deinit(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ kfree(host_ctx->prop_buf);
+ kfree(host_ctx->ipc_msg_buf);
destroy_workqueue(host_ctx->wq);
+ destroy_workqueue(host_ctx->wq_pri);
+ subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
mutex_destroy(&host_ctx->lock);
}
@@ -634,7 +700,7 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
/* Check that the event thread currently is running */
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->ipc_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->ipc_irq_work);
return IRQ_HANDLED;
}
@@ -683,7 +749,7 @@ irqreturn_t npu_err_intr_hdlr(int irq, void *ptr)
NPU_ERR("err_irq_sts %x\n", host_ctx->err_irq_sts);
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
return IRQ_HANDLED;
}
@@ -698,7 +764,7 @@ irqreturn_t npu_wdg_intr_hdlr(int irq, void *ptr)
NPU_ERR("wdg_irq_sts %x\n", host_ctx->wdg_irq_sts);
if (host_ctx->wq)
- queue_work(host_ctx->wq, &host_ctx->wdg_err_irq_work);
+ queue_work(host_ctx->wq_pri, &host_ctx->wdg_err_irq_work);
return IRQ_HANDLED;
}
@@ -767,7 +833,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
/* Keep reading ctrl status until NPU is ready */
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
- FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
+ FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
NPU_ERR("wait for fw status ready timedout\n");
ret = -EPERM;
goto fw_start_done;
@@ -951,7 +1017,7 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
}
static int wait_for_status_ready(struct npu_device *npu_dev,
- uint32_t status_reg, uint32_t status_bits)
+ uint32_t status_reg, uint32_t status_bits, bool poll)
{
uint32_t ctrl_sts = 0;
uint32_t wait_cnt = 0, max_wait_ms;
@@ -959,19 +1025,34 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
max_wait_ms = (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT_MS : NPU_FW_TIMEOUT_MS;
+ if (poll)
+ wait_cnt = max_wait_ms * 10;
+ else
+ wait_cnt = max_wait_ms / NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
/* keep reading status register until bits are set */
- while ((ctrl_sts & status_bits) != status_bits) {
+ do {
ctrl_sts = REGR(npu_dev, status_reg);
- msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
- wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
- if (wait_cnt >= max_wait_ms) {
+ if ((ctrl_sts & status_bits) == status_bits) {
+ NPU_DBG("status %x[reg %x] ready received\n",
+ status_bits, status_reg);
+ break;
+ }
+
+ if (!wait_cnt) {
NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
status_bits, ctrl_sts, status_reg);
return -EPERM;
}
- }
- NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
+
+ if (poll)
+ udelay(100);
+ else
+ msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
+
+ wait_cnt--;
+ } while (1);
+
return 0;
}
@@ -1159,6 +1240,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
uint32_t msg_id;
struct npu_network *network = NULL;
struct npu_kevent kevt;
+ struct npu_device *npu_dev = host_ctx->npu_dev;
msg_id = msg[1];
switch (msg_id) {
@@ -1347,6 +1429,73 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
complete_all(&host_ctx->misc_cmd_done);
break;
}
+ case NPU_IPC_MSG_SET_PROPERTY_DONE:
+ {
+ struct ipc_msg_prop_pkt *prop_rsp_pkt =
+ (struct ipc_msg_prop_pkt *)msg;
+ uint32_t *param = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+ sizeof(struct ipc_msg_prop_pkt));
+ NPU_DBG("NPU_IPC_MSG_SET_PROPERTY_DONE %d:0x%x:%d\n",
+ prop_rsp_pkt->network_hdl,
+ prop_rsp_pkt->prop_id,
+ param[0]);
+
+ host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
+ host_ctx->misc_cmd_pending = false;
+
+ complete_all(&host_ctx->misc_cmd_done);
+ break;
+ }
+ case NPU_IPC_MSG_GET_PROPERTY_DONE:
+ {
+ struct ipc_msg_prop_pkt *prop_rsp_pkt =
+ (struct ipc_msg_prop_pkt *)msg;
+ uint32_t prop_size = 0;
+ uint32_t *prop_data = (uint32_t *)((uint8_t *)prop_rsp_pkt +
+ sizeof(struct ipc_msg_header_pkt));
+
+ NPU_DBG("NPU_IPC_MSG_GET_PROPERTY_DONE %d:0x%x:%d:%d\n",
+ prop_rsp_pkt->network_hdl,
+ prop_rsp_pkt->prop_id,
+ prop_rsp_pkt->num_params,
+ prop_rsp_pkt->prop_param[0]);
+
+ host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
+ host_ctx->misc_cmd_pending = false;
+
+ if (prop_rsp_pkt->num_params > 0) {
+ /* Copy prop data to kernel buffer */
+ prop_size = prop_rsp_pkt->header.size -
+ sizeof(struct ipc_msg_header_pkt);
+ memcpy(host_ctx->prop_buf, prop_data, prop_size);
+ }
+
+ complete_all(&host_ctx->misc_cmd_done);
+ break;
+ }
+ case NPU_IPC_MSG_GENERAL_NOTIFY:
+ {
+ struct ipc_msg_general_notify_pkt *notify_msg_pkt =
+ (struct ipc_msg_general_notify_pkt *)msg;
+
+ NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY %d:0x%x:%d\n",
+ notify_msg_pkt->network_hdl,
+ notify_msg_pkt->notify_id,
+ notify_msg_pkt->notify_param[0]);
+
+ switch (notify_msg_pkt->notify_id) {
+ case NPU_NOTIFY_DCVS_MODE:
+ NPU_DBG("NPU_IPC_MSG_GENERAL_NOTIFY DCVS_MODE %d\n",
+ notify_msg_pkt->notify_param[0]);
+ update_dcvs_activity(npu_dev,
+ notify_msg_pkt->notify_param[0]);
+ break;
+ default:
+ NPU_ERR("Nothing to do\n");
+ break;
+ }
+ break;
+ }
default:
NPU_ERR("Not supported apps response received %d\n",
msg_id);
@@ -1356,27 +1505,22 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
static void host_session_msg_hdlr(struct npu_device *npu_dev)
{
- uint32_t *msg;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL);
- if (!msg)
- return;
-
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state != FW_ENABLED) {
NPU_WARN("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
- while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) {
+ while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP,
+ host_ctx->ipc_msg_buf) == 0) {
NPU_DBG("received from msg queue\n");
- app_msg_proc(host_ctx, msg);
+ app_msg_proc(host_ctx, host_ctx->ipc_msg_buf);
}
skip_read_msg:
mutex_unlock(&host_ctx->lock);
- kfree(msg);
}
static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
@@ -1402,28 +1546,22 @@ static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
static void host_session_log_hdlr(struct npu_device *npu_dev)
{
- uint32_t *msg;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- msg = kzalloc(sizeof(uint32_t) * NPU_IPC_BUF_LENGTH, GFP_KERNEL);
-
- if (!msg)
- return;
-
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state != FW_ENABLED) {
NPU_WARN("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
- while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
+ while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG,
+ host_ctx->ipc_msg_buf) == 0) {
NPU_DBG("received from log queue\n");
- log_msg_proc(npu_dev, msg);
+ log_msg_proc(npu_dev, host_ctx->ipc_msg_buf);
}
skip_read_msg:
mutex_unlock(&host_ctx->lock);
- kfree(msg);
}
/*
@@ -1463,7 +1601,7 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
* fw is disabled
*/
if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) &&
- !wait_for_completion_interruptible_timeout(
+ !wait_for_completion_timeout(
&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
NPU_WARN("npu: wait for fw_deinit_done time out\n");
@@ -1495,6 +1633,7 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
network->cmd_ret_status = 0;
network->cmd_pending = true;
network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
+ reinit_completion(&network->cmd_done);
NPU_DBG("Send cmd %d network id %llx trans id %d\n",
((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
network->id, network->trans_id);
@@ -1567,9 +1706,9 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
} else {
/* find the max level among all the networks */
for (i = 0; i < host_ctx->network_num; i++) {
- if ((network->perf_mode != 0) &&
- (network->perf_mode > max_perf_mode))
- max_perf_mode = network->perf_mode;
+ if ((network->cur_perf_mode != 0) &&
+ (network->cur_perf_mode > max_perf_mode))
+ max_perf_mode = network->cur_perf_mode;
network++;
}
}
@@ -1586,6 +1725,12 @@ static int set_perf_mode(struct npu_device *npu_dev)
networks_perf_mode = find_networks_perf_mode(host_ctx);
+ if (npu_dev->pwrctrl.perf_mode_override)
+ networks_perf_mode = npu_dev->pwrctrl.perf_mode_override;
+
+ if (npu_dev->pwrctrl.cur_dcvs_activity != NPU_DCVS_ACTIVITY_MAX_PERF)
+ networks_perf_mode = min_t(uint32_t, networks_perf_mode,
+ npu_dev->pwrctrl.cur_dcvs_activity);
ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
if (ret)
NPU_ERR("set uc power level %d failed\n", networks_perf_mode);
@@ -1593,12 +1738,182 @@ static int set_perf_mode(struct npu_device *npu_dev)
return ret;
}
+static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity)
+{
+ npu_dev->pwrctrl.cur_dcvs_activity = activity;
+ NPU_DBG("update dcvs activity to %d\n", activity);
+
+ return set_perf_mode(npu_dev);
+}
+
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property)
+{
+ int ret = 0, i;
+ uint32_t prop_param, prop_id;
+ struct ipc_cmd_prop_pkt *prop_packet = NULL;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ uint32_t num_of_params, pkt_size;
+
+ prop_id = property->prop_id;
+ num_of_params = min_t(uint32_t, property->num_of_params,
+ (uint32_t)PROP_PARAM_MAX_SIZE);
+ pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+ prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+ if (!prop_packet)
+ return -ENOMEM;
+
+ switch (prop_id) {
+ case MSM_NPU_PROP_ID_DCVS_MODE:
+ prop_param = min_t(uint32_t, property->prop_param[0],
+ (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+ property->prop_param[0] = prop_param;
+ NPU_DBG("setting dcvs_mode to %d[%d:%d]\n", prop_param,
+ property->prop_param[0],
+ (uint32_t)(npu_dev->pwrctrl.num_pwrlevels - 1));
+
+ if (property->network_hdl == 0) {
+ npu_dev->pwrctrl.dcvs_mode = prop_param;
+ NPU_DBG("Set global dcvs mode %d\n", prop_param);
+ }
+ break;
+ default:
+ NPU_ERR("unsupported property %d\n", property->prop_id);
+ goto set_prop_exit;
+ }
+
+ prop_packet->header.cmd_type = NPU_IPC_CMD_SET_PROPERTY;
+ prop_packet->header.size = pkt_size;
+ prop_packet->header.trans_id =
+ atomic_add_return(1, &host_ctx->ipc_trans_id);
+ prop_packet->header.flags = 0;
+
+ prop_packet->prop_id = prop_id;
+ prop_packet->num_params = num_of_params;
+ prop_packet->network_hdl = property->network_hdl;
+ for (i = 0; i < num_of_params; i++)
+ prop_packet->prop_param[i] = property->prop_param[i];
+
+ mutex_lock(&host_ctx->lock);
+ ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+ prop_packet);
+ NPU_DBG("NPU_IPC_CMD_SET_PROPERTY sent status: %d\n", ret);
+
+ if (ret) {
+ NPU_ERR("NPU_IPC_CMD_SET_PROPERTY failed\n");
+ goto set_prop_exit;
+ }
+ mutex_unlock(&host_ctx->lock);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &host_ctx->misc_cmd_done,
+ (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+ NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+ mutex_lock(&host_ctx->lock);
+ if (!ret) {
+ NPU_ERR("NPU_IPC_CMD_SET_PROPERTY time out\n");
+ ret = -ETIMEDOUT;
+ goto set_prop_exit;
+ } else if (ret < 0) {
+ NPU_ERR("Wait for set_property done interrupted by signal\n");
+ goto set_prop_exit;
+ }
+
+ ret = host_ctx->misc_cmd_result;
+ if (ret)
+ NPU_ERR("set fw property failed %d\n", ret);
+
+set_prop_exit:
+ mutex_unlock(&host_ctx->lock);
+ kfree(prop_packet);
+ return ret;
+}
+
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property)
+{
+ int ret = 0, i;
+ struct ipc_cmd_prop_pkt *prop_packet = NULL;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct msm_npu_property *prop_from_fw;
+ uint32_t num_of_params, pkt_size;
+
+ num_of_params = min_t(uint32_t, property->num_of_params,
+ (uint32_t)PROP_PARAM_MAX_SIZE);
+ pkt_size = sizeof(*prop_packet) + num_of_params * sizeof(uint32_t);
+ prop_packet = kzalloc(pkt_size, GFP_KERNEL);
+
+ if (!prop_packet)
+ return -ENOMEM;
+
+ prop_packet->header.cmd_type = NPU_IPC_CMD_GET_PROPERTY;
+ prop_packet->header.size = pkt_size;
+ prop_packet->header.trans_id =
+ atomic_add_return(1, &host_ctx->ipc_trans_id);
+ prop_packet->header.flags = 0;
+
+ prop_packet->prop_id = property->prop_id;
+ prop_packet->num_params = num_of_params;
+ prop_packet->network_hdl = property->network_hdl;
+ for (i = 0; i < num_of_params; i++)
+ prop_packet->prop_param[i] = property->prop_param[i];
+
+ mutex_lock(&host_ctx->lock);
+ ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
+ prop_packet);
+ NPU_DBG("NPU_IPC_CMD_GET_PROPERTY sent status: %d\n", ret);
+
+ if (ret) {
+ NPU_ERR("NPU_IPC_CMD_GET_PROPERTY failed\n");
+ goto get_prop_exit;
+ }
+ mutex_unlock(&host_ctx->lock);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &host_ctx->misc_cmd_done,
+ (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
+ NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
+
+ mutex_lock(&host_ctx->lock);
+ if (!ret) {
+ pr_err_ratelimited("npu: NPU_IPC_CMD_GET_PROPERTY time out\n");
+ ret = -ETIMEDOUT;
+ goto get_prop_exit;
+ } else if (ret < 0) {
+ NPU_ERR("Wait for get_property done interrupted by signal\n");
+ goto get_prop_exit;
+ }
+
+ ret = host_ctx->misc_cmd_result;
+ if (!ret) {
+ /* Return prop data retrieved from fw to user */
+ prop_from_fw = (struct msm_npu_property *)(host_ctx->prop_buf);
+ if (property->prop_id == prop_from_fw->prop_id &&
+ property->network_hdl == prop_from_fw->network_hdl) {
+ property->num_of_params = num_of_params;
+ for (i = 0; i < num_of_params; i++)
+ property->prop_param[i] =
+ prop_from_fw->prop_param[i];
+ }
+ } else {
+ NPU_ERR("get fw property failed %d\n", ret);
+ }
+
+get_prop_exit:
+ mutex_unlock(&host_ctx->lock);
+ kfree(prop_packet);
+ return ret;
+}
+
int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info)
{
int ret = 0, i;
struct npu_device *npu_dev = client->npu_dev;
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
struct npu_network *network;
struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
struct ipc_cmd_unload_pkt unload_packet;
@@ -1636,7 +1951,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
network->phy_add = load_ioctl->buf_phys_addr;
network->first_block_size = load_ioctl->first_block_size;
network->priority = load_ioctl->priority;
- network->perf_mode = load_ioctl->perf_mode;
+ network->cur_perf_mode = network->init_perf_mode =
+ (load_ioctl->perf_mode == PERF_MODE_DEFAULT) ?
+ pwr->num_pwrlevels : load_ioctl->perf_mode;
network->num_layers = load_ioctl->num_layers;
/* verify mapped physical address */
@@ -1665,8 +1982,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
load_packet->buf_pkt.num_layers = network->num_layers;
load_packet->num_patch_params = num_patch_params;
- /* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
if (ret) {
NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
@@ -1769,8 +2084,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
unload_packet.header.flags = 0;
unload_packet.network_hdl = (uint32_t)network->network_hdl;
- /* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
false);
@@ -1823,9 +2136,10 @@ free_network:
*/
network_put(network);
free_network(host_ctx, client, network->id);
- /* update perf mode */
- if (set_perf_mode(npu_dev))
- NPU_WARN("set_perf_mode failed\n");
+
+ /* recalculate uc_power_level after unload network */
+ if (npu_dev->pwrctrl.cur_dcvs_activity)
+ set_perf_mode(npu_dev);
mutex_unlock(&host_ctx->lock);
@@ -1912,8 +2226,6 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
exec_packet->header.flags, exec_ioctl->stats_buf_size);
- /* Send it on the high priority queue */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
false);
@@ -2064,3 +2376,85 @@ void npu_host_cleanup_networks(struct npu_client *client)
npu_host_unmap_buf(client, &unmap_req);
}
}
+
+/*
+ * set network or global perf_mode
+ * if network_hdl is 0, set global perf_mode_override
+ * otherwise set network perf_mode: if perf_mode is 0,
+ * change network perf_mode to initial perf_mode from
+ * load_network
+ */
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+ uint32_t perf_mode)
+{
+ int ret = 0;
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network *network = NULL;
+
+ mutex_lock(&host_ctx->lock);
+
+ if (network_hdl == 0) {
+ NPU_DBG("change perf_mode_override to %d\n", perf_mode);
+ npu_dev->pwrctrl.perf_mode_override = perf_mode;
+ } else {
+ network = get_network_by_hdl(host_ctx, client, network_hdl);
+ if (!network) {
+ NPU_ERR("invalid network handle %x\n", network_hdl);
+ mutex_unlock(&host_ctx->lock);
+ return -EINVAL;
+ }
+
+ if (perf_mode == 0) {
+ network->cur_perf_mode = network->init_perf_mode;
+ NPU_DBG("change network %d perf_mode back to %d\n",
+ network_hdl, network->cur_perf_mode);
+ } else {
+ network->cur_perf_mode = perf_mode;
+ NPU_DBG("change network %d perf_mode to %d\n",
+ network_hdl, network->cur_perf_mode);
+ }
+ }
+
+ ret = set_perf_mode(npu_dev);
+ if (ret)
+ NPU_ERR("set_perf_mode failed\n");
+
+ if (network)
+ network_put(network);
+ mutex_unlock(&host_ctx->lock);
+
+ return ret;
+}
+
+/*
+ * get the currently set network or global perf_mode
+ * if network_hdl is 0, get global perf_mode_override
+ * otherwise get network perf_mode
+ */
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl)
+{
+ int param_val = 0;
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network *network = NULL;
+
+ mutex_lock(&host_ctx->lock);
+
+ if (network_hdl == 0) {
+ param_val = npu_dev->pwrctrl.perf_mode_override;
+ } else {
+ network = get_network_by_hdl(host_ctx, client, network_hdl);
+ if (!network) {
+ NPU_ERR("invalid network handle %x\n", network_hdl);
+ mutex_unlock(&host_ctx->lock);
+ return -EINVAL;
+ }
+ param_val = network->cur_perf_mode;
+ network_put(network);
+ }
+
+ mutex_unlock(&host_ctx->lock);
+
+ return param_val;
+}
diff --git a/drivers/media/platform/msm/npu_v2/npu_mgr.h b/drivers/media/platform/msm/npu_v2/npu_mgr.h
index 14e526d2b01f..0ffe083fa14b 100644
--- a/drivers/media/platform/msm/npu_v2/npu_mgr.h
+++ b/drivers/media/platform/msm/npu_v2/npu_mgr.h
@@ -31,7 +31,7 @@
#define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
#define FIRMWARE_VERSION 0x00001000
#define MAX_LOADED_NETWORK 32
-#define NPU_IPC_BUF_LENGTH 512
+#define NPU_IPC_BUF_LENGTH 4096
#define FW_DBG_MODE_PAUSE (1 << 0)
#define FW_DBG_MODE_INC_TIMEOUT (1 << 1)
@@ -48,7 +48,8 @@ struct npu_network {
uint32_t first_block_size;
uint32_t network_hdl;
uint32_t priority;
- uint32_t perf_mode;
+ uint32_t cur_perf_mode;
+ uint32_t init_perf_mode;
uint32_t num_layers;
void *stats_buf;
void __user *stats_buf_u;
@@ -77,6 +78,7 @@ struct npu_host_ctx {
void *subsystem_handle;
enum fw_state fw_state;
int32_t fw_ref_cnt;
+ int32_t npu_init_cnt;
int32_t power_vote_num;
struct work_struct ipc_irq_work;
struct work_struct wdg_err_irq_work;
@@ -85,11 +87,13 @@ struct npu_host_ctx {
struct work_struct update_pwr_work;
struct delayed_work disable_fw_work;
struct workqueue_struct *wq;
+ struct workqueue_struct *wq_pri;
struct completion misc_cmd_done;
struct completion fw_deinit_done;
struct completion fw_bringup_done;
struct completion fw_shutdown_done;
struct completion npu_power_up_done;
+ void *prop_buf;
int32_t network_num;
struct npu_network networks[MAX_LOADED_NETWORK];
bool sys_cache_disable;
@@ -109,6 +113,7 @@ struct npu_host_ctx {
void *notif_hdle;
spinlock_t bridge_mbox_lock;
bool bridge_mbox_pwr_on;
+ void *ipc_msg_buf;
};
struct npu_device;
@@ -143,11 +148,17 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
struct msm_npu_patch_buf_info *patch_buf_info);
int32_t npu_host_loopback_test(struct npu_device *npu_dev);
+int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property);
+int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
+ struct msm_npu_property *property);
void npu_host_cleanup_networks(struct npu_client *client);
int npu_host_notify_fw_pwr_state(struct npu_device *npu_dev,
uint32_t pwr_level, bool post);
int npu_host_update_power(struct npu_device *npu_dev);
-
+int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
+ uint32_t perf_mode);
+int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
void npu_dump_debug_info(struct npu_device *npu_dev);
void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
index 6253047a5c5f..fcb010cf87d0 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.c
@@ -3331,6 +3331,7 @@ int sde_rotator_runtime_idle(struct device *dev)
int sde_rotator_pm_suspend(struct device *dev)
{
struct sde_rot_mgr *mgr;
+ int i;
mgr = sde_rot_mgr_from_device(dev);
@@ -3345,8 +3346,20 @@ int sde_rotator_pm_suspend(struct device *dev)
sde_rotator_suspend_cancel_rot_work(mgr);
mgr->minimum_bw_vote = 0;
sde_rotator_update_perf(mgr);
+ mgr->pm_rot_enable_clk_cnt = mgr->rot_enable_clk_cnt;
+
+ if (mgr->pm_rot_enable_clk_cnt) {
+ for (i = 0; i < mgr->pm_rot_enable_clk_cnt; i++)
+ sde_rotator_clk_ctrl(mgr, false);
+
+ sde_rotator_update_clk(mgr);
+ }
+
ATRACE_END("pm_active");
- SDEROT_DBG("end pm active %d\n", atomic_read(&mgr->device_suspended));
+ SDEROT_DBG("end pm active %d clk_cnt %d\n",
+ atomic_read(&mgr->device_suspended), mgr->pm_rot_enable_clk_cnt);
+ SDEROT_EVTLOG(mgr->pm_rot_enable_clk_cnt,
+ atomic_read(&mgr->device_suspended));
sde_rot_mgr_unlock(mgr);
return 0;
}
@@ -3358,6 +3371,7 @@ int sde_rotator_pm_suspend(struct device *dev)
int sde_rotator_pm_resume(struct device *dev)
{
struct sde_rot_mgr *mgr;
+ int i;
mgr = sde_rot_mgr_from_device(dev);
@@ -3377,10 +3391,20 @@ int sde_rotator_pm_resume(struct device *dev)
pm_runtime_enable(dev);
sde_rot_mgr_lock(mgr);
- SDEROT_DBG("begin pm active %d\n", atomic_read(&mgr->device_suspended));
+ SDEROT_DBG("begin pm active %d clk_cnt %d\n",
+ atomic_read(&mgr->device_suspended), mgr->pm_rot_enable_clk_cnt);
ATRACE_BEGIN("pm_active");
+ SDEROT_EVTLOG(mgr->pm_rot_enable_clk_cnt,
+ atomic_read(&mgr->device_suspended));
atomic_dec(&mgr->device_suspended);
sde_rotator_update_perf(mgr);
+
+ if (mgr->pm_rot_enable_clk_cnt) {
+ sde_rotator_update_clk(mgr);
+ for (i = 0; i < mgr->pm_rot_enable_clk_cnt; i++)
+ sde_rotator_clk_ctrl(mgr, true);
+ }
+
sde_rot_mgr_unlock(mgr);
return 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index eca54ee1bfa6..4689d7f649ef 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -393,6 +393,7 @@ struct sde_rot_bus_data_type {
* @regulator_enable: true if foot switch is enabled; false otherwise
* @res_ref_cnt: reference count of how many times resource is requested
* @rot_enable_clk_cnt: reference count of how many times clock is requested
+ * @pm_rot_enable_clk_cnt : tracks the clock enable count on pm suspend
* @rot_clk: array of rotator and periphery clocks
* @num_rot_clk: size of the rotator clock array
* @rdot_limit: current read OT limit
@@ -439,6 +440,7 @@ struct sde_rot_mgr {
int res_ref_cnt;
int rot_enable_clk_cnt;
+ int pm_rot_enable_clk_cnt;
struct sde_rot_clk *rot_clk;
int num_rot_clk;
u32 rdot_limit;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index 3537d9b304cf..c8daa5b5ed49 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -2399,7 +2399,7 @@ static int sde_rotator_qbuf(struct file *file, void *fh,
ctx->vbinfo_cap[idx].qbuf_ts = ktime_get();
ctx->vbinfo_cap[idx].dqbuf_ts = NULL;
SDEDEV_DBG(ctx->rot_dev->dev,
- "create buffer fence s:%d.%u i:%d f:%p\n",
+ "create buffer fence s:%d.%u i:%d f:%pK\n",
ctx->session_id,
ctx->vbinfo_cap[idx].fence_ts,
idx,
@@ -3088,7 +3088,7 @@ static void sde_rotator_retire_handler(struct kthread_work *work)
if (!src_buf || !dst_buf) {
SDEDEV_ERR(rot_dev->dev,
- "null buffer in retire s:%d sb:%p db:%p\n",
+ "null buffer in retire s:%d sb:%pK db:%pK\n",
ctx->session_id,
src_buf, dst_buf);
}
@@ -3377,7 +3377,7 @@ static void sde_rotator_device_run(void *priv)
dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!src_buf || !dst_buf) {
SDEDEV_ERR(rot_dev->dev,
- "null buffer in device run s:%d sb:%p db:%p\n",
+ "null buffer in device run s:%d sb:%pK db:%pK\n",
ctx->session_id,
src_buf, dst_buf);
goto error_process_buffers;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 7bbeba205895..60e6c9688760 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -742,6 +742,9 @@ static struct msm_vidc_efuse_data sdmmagpie_efuse_data[] = {
EFUSE_ENTRY(0x00786018, 4, 0x00000400, 0x0a, SKU_VERSION),
};
+static struct msm_vidc_efuse_data atoll_efuse_data[] = {
+ EFUSE_ENTRY(0x00786008, 4, 0x08000000, 0x1b, SKU_VERSION),
+};
static struct msm_vidc_ubwc_config trinket_ubwc_data[] = {
UBWC_CONFIG(0, 1, 0, 0, 0, 64, 0, 0),
};
@@ -782,8 +785,10 @@ static struct msm_vidc_platform_data atoll_data = {
.csc_data.vpe_csc_custom_bias_coeff = vpe_csc_custom_bias_coeff,
.csc_data.vpe_csc_custom_matrix_coeff = vpe_csc_custom_matrix_coeff,
.csc_data.vpe_csc_custom_limit_coeff = vpe_csc_custom_limit_coeff,
- .efuse_data = NULL,
- .efuse_data_length = 0,
+ .efuse_data = atoll_efuse_data,
+ .efuse_data_length = ARRAY_SIZE(atoll_efuse_data),
+ .heic_image_capability = &default_heic_image_capability,
+ .hevc_image_capability = &default_hevc_image_capability,
.sku_version = 0,
.vpu_ver = VPU_VERSION_4,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom.c b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom.c
index 56023aeeec0a..7245c3be0cbc 100644
--- a/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom.c
+++ b/drivers/net/ethernet/aquantia/atlantic-fwd/atl_qcom.c
@@ -125,34 +125,48 @@ err_release_mapping:
static int atl_qcom_attach_smmu(struct device *dev)
{
- int rc = 0;
+ bool dt_present = !!of_find_property(dev->of_node, "qcom,smmu", NULL);
+ bool smmu_attached = !!iommu_get_domain_for_dev(dev);
+
+ if (smmu_attached) {
+ /* On platforms where IOMMU is attached automatically, we do
+ * not expect qcom,smmu property to be present in devicetree.
+ */
+ if (dt_present) {
+ dev_err(dev, "SMMU DT node is not expected\n");
+ return -EEXIST;
+ }
- if (!dev->of_node) {
- dev_dbg(dev, "device tree node is not present\n");
return 0;
}
- if (of_find_property(dev->of_node, "qcom,smmu", NULL))
- rc = __atl_qcom_attach_smmu(dev);
- else
- dev_dbg(dev, "SMMU config not present in DT\n");
+ if (!dt_present) {
+ dev_err(dev, "SMMU DT is required for the device\n");
+ return -EFAULT;
+ }
- return 0;
+ return __atl_qcom_attach_smmu(dev);
}
static void atl_qcom_detach_smmu(struct device *dev)
{
- struct dma_iommu_mapping *mapping;
-
- if (!dev->of_node || !of_find_property(dev->of_node, "qcom,smmu", NULL))
- return;
-
- mapping = to_dma_iommu_mapping(dev);
- if (!mapping)
- return;
-
- arm_iommu_detach_device(dev);
- arm_iommu_release_mapping(mapping);
+ bool dt_present = !!of_find_property(dev->of_node, "qcom,smmu", NULL);
+ bool smmu_attached = !!iommu_get_domain_for_dev(dev);
+
+ /* Perform a manual deattach only if we were tasked with doing the
+ * attach originally.
+ */
+ if (dt_present && smmu_attached) {
+ struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
+
+ if (!mapping) {
+ dev_err(dev, "Failed to retrieve IOMMU mapping\n");
+ return;
+ }
+
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(mapping);
+ }
}
static int atl_qcom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 1c85eaf58e37..bdaaaa546425 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -87,6 +87,8 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
if (port->nr_rmnet_devs)
return -EINVAL;
+ netdev_rx_handler_unregister(real_dev);
+
rmnet_map_cmd_exit(port);
rmnet_map_tx_aggregate_exit(port);
@@ -94,8 +96,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
- netdev_rx_handler_unregister(real_dev);
-
/* release reference on real_dev */
dev_put(real_dev);
@@ -263,9 +263,9 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
if (!port->nr_rmnet_devs)
qmi_rmnet_qmi_exit(port->qmi_info, port);
- rmnet_unregister_real_device(real_dev, port);
+ unregister_netdevice(dev);
- unregister_netdevice_queue(dev, head);
+ rmnet_unregister_real_device(real_dev, port);
}
static void rmnet_force_unassociate_device(struct net_device *dev)
@@ -295,7 +295,9 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
synchronize_rcu();
kfree(ep);
}
-
+ /* Unregistering devices in context before freeing port.
+ * If this API becomes non-context their order should switch.
+ */
unregister_netdevice_many(&list);
rmnet_unregister_real_device(real_dev, port);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 78c38ed8e6b1..eb75de68214c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -13,7 +13,9 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
+obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-gpio.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-pps.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index e93c40b4631e..6ece873a1fce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -43,10 +43,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
len = nopaged_len - bmax;
- des2 = dma_map_single(priv->device, skb->data,
+ des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
bmax, DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
- if (dma_mapping_error(priv->device, des2))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
return -1;
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = bmax;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index d3727deda7e1..0cc42f8005c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -6,16 +6,24 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/of_mdio.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+
+
#include "stmmac.h"
#include "stmmac_platform.h"
+#include "dwmac-qcom-ethqos.h"
+#include "stmmac_ptp.h"
-#define RGMII_IO_MACRO_CONFIG 0x0
-#define SDCC_HC_REG_DLL_CONFIG 0x4
-#define SDCC_HC_REG_DDR_CONFIG 0xC
-#define SDCC_HC_REG_DLL_CONFIG2 0x10
-#define SDC4_STATUS 0x14
-#define SDCC_USR_CTL 0x18
-#define RGMII_IO_MACRO_CONFIG2 0x1C
#define RGMII_IO_MACRO_DEBUG1 0x20
#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
@@ -70,22 +78,34 @@
#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
-struct ethqos_emac_por {
- unsigned int offset;
- unsigned int value;
-};
+#define EMAC_I0_EMAC_CORE_HW_VERSION_RGOFFADDR 0x00000070
+#define EMAC_HW_v2_3_2_RG 0x20030002
-struct qcom_ethqos {
- struct platform_device *pdev;
- void __iomem *rgmii_base;
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
- unsigned int rgmii_clk_rate;
- struct clk *rgmii_clk;
- unsigned int speed;
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_WRITE BIT(MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
- const struct ethqos_emac_por *por;
- unsigned int num_por;
-};
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+
+#define DWC_ETH_QOS_PHY_INTR_STATUS 0x0013
+
+#define LINK_UP 1
+#define LINK_DOWN 0
+
+#define LINK_DOWN_STATE 0x800
+#define LINK_UP_STATE 0x400
+
+bool phy_intr_en;
+
+struct qcom_ethqos *pethqos;
+struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0};
+
+void *ipc_emac_log_ctxt;
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
{
@@ -162,23 +182,18 @@ static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
-static const struct ethqos_emac_por emac_v2_3_0_por[] = {
- { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
- { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
- { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
- { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
- { .offset = SDCC_USR_CTL, .value = 0x00010800 },
- { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
-};
-
static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
{
unsigned int val;
int retry = 1000;
/* Set CDR_EN */
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
- SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+ else
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
+ SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
/* Set CDR_EXT_EN */
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
@@ -192,12 +207,13 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
- rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
- 0, SDCC_HC_REG_DLL_CONFIG);
-
- rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
- 0, SDCC_HC_REG_DLL_CONFIG);
+ if (ethqos->emac_ver != EMAC_HW_v2_3_2_RG) {
+ rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+ }
/* Wait for CK_OUT_EN clear */
do {
val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
@@ -231,18 +247,20 @@ static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
- 0, SDCC_HC_REG_DLL_CONFIG2);
+ if (ethqos->emac_ver != EMAC_HW_v2_3_2_RG) {
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ 0, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
- 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
+ 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
- BIT(2), SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
+ BIT(2), SDCC_HC_REG_DLL_CONFIG2);
- rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
- SDCC_HC_REG_DLL_CONFIG2);
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_HC_REG_DLL_CONFIG2);
+ }
return 0;
}
@@ -280,13 +298,22 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
RGMII_IO_MACRO_CONFIG2);
/* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
- rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
- 57, SDCC_HC_REG_DDR_CONFIG);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 69, SDCC_HC_REG_DDR_CONFIG);
+ else
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 57, SDCC_HC_REG_DDR_CONFIG);
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_DDR_CONFIG_PRG_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG);
break;
case SPEED_100:
@@ -308,8 +335,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
BIT(6), RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
@@ -319,8 +351,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG);
break;
case SPEED_10:
@@ -335,15 +372,27 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
0, RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
- 0, RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ else
+ rgmii_updatel(ethqos,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
BIT(12) | GENMASK(9, 8),
RGMII_IO_MACRO_CONFIG);
rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
0, RGMII_IO_MACRO_CONFIG2);
- rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
- 0, RGMII_IO_MACRO_CONFIG2);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
/* Write 0x5 to PRG_RCLK_DLY_CODE */
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
(BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
@@ -353,8 +402,13 @@ static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
SDCC_HC_REG_DDR_CONFIG);
- rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
- RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_IO_MACRO_CONFIG);
break;
default:
dev_err(&ethqos->pdev->dev,
@@ -436,6 +490,400 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
ethqos_configure(ethqos);
}
+static int ethqos_mdio_read(struct stmmac_priv *priv, int phyaddr, int phyreg)
+{
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 v;
+ int data;
+ u32 value = MII_BUSY;
+
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ if (priv->plat->has_gmac4)
+ value |= MII_GMAC4_READ;
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ writel_relaxed(value, priv->ioaddr + mii_address);
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ data = (int)readl_relaxed(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+static int ethqos_phy_intr_config(struct qcom_ethqos *ethqos)
+{
+ int ret = 0;
+
+ ethqos->phy_intr = platform_get_irq_byname(ethqos->pdev, "phy-intr");
+
+ if (ethqos->phy_intr < 0) {
+ if (ethqos->phy_intr != -EPROBE_DEFER) {
+ dev_err(&ethqos->pdev->dev,
+ "PHY IRQ configuration information not found\n");
+ }
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void ethqos_handle_phy_interrupt(struct qcom_ethqos *ethqos)
+{
+ int phy_intr_status = 0;
+ struct platform_device *pdev = ethqos->pdev;
+
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ phy_intr_status = ethqos_mdio_read(priv, priv->plat->phy_addr,
+ DWC_ETH_QOS_PHY_INTR_STATUS);
+
+ if (phy_intr_status & LINK_UP_STATE)
+ phy_mac_interrupt(dev->phydev, LINK_UP);
+ else if (phy_intr_status & LINK_DOWN_STATE)
+ phy_mac_interrupt(dev->phydev, LINK_DOWN);
+}
+
+static void ethqos_defer_phy_isr_work(struct work_struct *work)
+{
+ struct qcom_ethqos *ethqos =
+ container_of(work, struct qcom_ethqos, emac_phy_work);
+
+ ethqos_handle_phy_interrupt(ethqos);
+}
+
+static irqreturn_t ETHQOS_PHY_ISR(int irq, void *dev_data)
+{
+ struct qcom_ethqos *ethqos = (struct qcom_ethqos *)dev_data;
+
+ queue_work(system_wq, &ethqos->emac_phy_work);
+}
+
+static int ethqos_phy_intr_enable(struct qcom_ethqos *ethqos)
+{
+ int ret = 0;
+ struct net_device *dev = platform_get_drvdata(ethqos->pdev);
+
+ INIT_WORK(&ethqos->emac_phy_work, ethqos_defer_phy_isr_work);
+ ret = request_irq(ethqos->phy_intr, ETHQOS_PHY_ISR,
+ IRQF_SHARED, "stmmac", ethqos);
+ if (ret) {
+ ETHQOSERR("Unable to register PHY IRQ %d\n",
+ ethqos->phy_intr);
+ return ret;
+ }
+ phy_intr_en = true;
+ return ret;
+}
+
+static void ethqos_pps_irq_config(struct qcom_ethqos *ethqos)
+{
+ ethqos->pps_class_a_irq =
+ platform_get_irq_byname(ethqos->pdev, "ptp_pps_irq_0");
+ if (ethqos->pps_class_a_irq < 0) {
+ if (ethqos->pps_class_a_irq != -EPROBE_DEFER)
+ ETHQOSERR("class_a_irq config info not found\n");
+ }
+ ethqos->pps_class_b_irq =
+ platform_get_irq_byname(ethqos->pdev, "ptp_pps_irq_1");
+ if (ethqos->pps_class_b_irq < 0) {
+ if (ethqos->pps_class_b_irq != -EPROBE_DEFER)
+ ETHQOSERR("class_b_irq config info not found\n");
+ }
+}
+
+static const struct of_device_id qcom_ethqos_match[] = {
+ { .compatible = "qcom,sdxprairie-ethqos", .data = &emac_v2_3_2_por},
+ { .compatible = "qcom,emac-smmu-embedded", },
+ { }
+};
+
+static ssize_t read_phy_reg_dump(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct qcom_ethqos *ethqos = file->private_data;
+ unsigned int len = 0, buf_len = 2000;
+ char *buf;
+ ssize_t ret_cnt;
+ int phydata = 0;
+ int i = 0;
+
+ struct platform_device *pdev = ethqos->pdev;
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!ethqos || !dev->phydev) {
+ ETHQOSERR("NULL Pointer\n");
+ return -EINVAL;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\n************* PHY Reg dump *************\n");
+
+ for (i = 0; i < 32; i++) {
+ phydata = ethqos_mdio_read(priv, priv->plat->phy_addr, i);
+ len += scnprintf(buf + len, buf_len - len,
+ "MII Register (%#x) = %#x\n",
+ i, phydata);
+ }
+
+ if (len > buf_len) {
+ ETHQOSERR("(len > buf_len) buffer not sufficient\n");
+ len = buf_len;
+ }
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t read_rgmii_reg_dump(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct qcom_ethqos *ethqos = file->private_data;
+ unsigned int len = 0, buf_len = 2000;
+ char *buf;
+ ssize_t ret_cnt;
+ int rgmii_data = 0;
+ struct platform_device *pdev = ethqos->pdev;
+
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ if (!ethqos || !dev->phydev) {
+ ETHQOSERR("NULL Pointer\n");
+ return -EINVAL;
+ }
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\n************* RGMII Reg dump *************\n");
+ rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG);
+ len += scnprintf(buf + len, buf_len - len,
+ "RGMII_IO_MACRO_CONFIG Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ len += scnprintf(buf + len, buf_len - len,
+ "SDCC_HC_REG_DLL_CONFIG Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG);
+ len += scnprintf(buf + len, buf_len - len,
+ "SDCC_HC_REG_DDR_CONFIG Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2);
+ len += scnprintf(buf + len, buf_len - len,
+ "SDCC_HC_REG_DLL_CONFIG2 Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, SDC4_STATUS);
+ len += scnprintf(buf + len, buf_len - len,
+ "SDC4_STATUS Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, SDCC_USR_CTL);
+ len += scnprintf(buf + len, buf_len - len,
+ "SDCC_USR_CTL Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2);
+ len += scnprintf(buf + len, buf_len - len,
+ "RGMII_IO_MACRO_CONFIG2 Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1);
+ len += scnprintf(buf + len, buf_len - len,
+ "RGMII_IO_MACRO_DEBUG1 Register = %#x\n",
+ rgmii_data);
+ rgmii_data = rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG);
+ len += scnprintf(buf + len, buf_len - len,
+ "EMAC_SYSTEM_LOW_POWER_DEBUG Register = %#x\n",
+ rgmii_data);
+
+ if (len > buf_len) {
+ ETHQOSERR("(len > buf_len) buffer not sufficient\n");
+ len = buf_len;
+ }
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_phy_reg_dump = {
+ .read = read_phy_reg_dump,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_rgmii_reg_dump = {
+ .read = read_rgmii_reg_dump,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ethqos_create_debugfs(struct qcom_ethqos *ethqos)
+{
+ static struct dentry *phy_reg_dump;
+ static struct dentry *rgmii_reg_dump;
+
+ if (!ethqos) {
+ ETHQOSERR("Null Param %s\n", __func__);
+ return -ENOMEM;
+ }
+
+ ethqos->debugfs_dir = debugfs_create_dir("eth", NULL);
+
+ if (!ethqos->debugfs_dir || IS_ERR(ethqos->debugfs_dir)) {
+ ETHQOSERR("Can't create debugfs dir\n");
+ return -ENOMEM;
+ }
+
+ phy_reg_dump = debugfs_create_file("phy_reg_dump", 0400,
+ ethqos->debugfs_dir, ethqos,
+ &fops_phy_reg_dump);
+ if (!phy_reg_dump || IS_ERR(phy_reg_dump)) {
+ ETHQOSERR("Can't create phy_dump %d\n", (int)phy_reg_dump);
+ goto fail;
+ }
+
+ rgmii_reg_dump = debugfs_create_file("rgmii_reg_dump", 0400,
+ ethqos->debugfs_dir, ethqos,
+ &fops_rgmii_reg_dump);
+ if (!rgmii_reg_dump || IS_ERR(rgmii_reg_dump)) {
+ ETHQOSERR("Can't create rgmii_dump %d\n", (int)rgmii_reg_dump);
+ goto fail;
+ }
+ return 0;
+
+fail:
+ debugfs_remove_recursive(ethqos->debugfs_dir);
+ return -ENOMEM;
+}
+
+static void emac_emb_smmu_exit(void)
+{
+ if (emac_emb_smmu_ctx.valid) {
+ if (emac_emb_smmu_ctx.smmu_pdev)
+ arm_iommu_detach_device
+ (&emac_emb_smmu_ctx.smmu_pdev->dev);
+ if (emac_emb_smmu_ctx.mapping)
+ arm_iommu_release_mapping(emac_emb_smmu_ctx.mapping);
+ emac_emb_smmu_ctx.valid = false;
+ emac_emb_smmu_ctx.mapping = NULL;
+ emac_emb_smmu_ctx.pdev_master = NULL;
+ emac_emb_smmu_ctx.smmu_pdev = NULL;
+ }
+}
+
+static int emac_emb_smmu_cb_probe(struct platform_device *pdev)
+{
+ int result;
+ u32 iova_ap_mapping[2];
+ struct device *dev = &pdev->dev;
+ int atomic_ctx = 1;
+ int fast = 1;
+ int bypass = 1;
+
+ ETHQOSDBG("EMAC EMB SMMU CB probe: smmu pdev=%p\n", pdev);
+
+ result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+ iova_ap_mapping, 2);
+ if (result) {
+ ETHQOSERR("Failed to read EMB start/size iova addresses\n");
+ return result;
+ }
+ emac_emb_smmu_ctx.va_start = iova_ap_mapping[0];
+ emac_emb_smmu_ctx.va_size = iova_ap_mapping[1];
+ emac_emb_smmu_ctx.va_end = emac_emb_smmu_ctx.va_start +
+ emac_emb_smmu_ctx.va_size;
+
+ emac_emb_smmu_ctx.smmu_pdev = pdev;
+
+ if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+ ETHQOSERR("DMA set 32bit mask failed\n");
+ return -EOPNOTSUPP;
+ }
+
+ emac_emb_smmu_ctx.mapping = arm_iommu_create_mapping
+ (dev->bus, emac_emb_smmu_ctx.va_start, emac_emb_smmu_ctx.va_size);
+ if (IS_ERR_OR_NULL(emac_emb_smmu_ctx.mapping)) {
+ ETHQOSDBG("Fail to create mapping\n");
+ /* assume this failure is because iommu driver is not ready */
+ return -EPROBE_DEFER;
+ }
+ ETHQOSDBG("Successfully Created SMMU mapping\n");
+ emac_emb_smmu_ctx.valid = true;
+
+ if (of_property_read_bool(dev->of_node, "qcom,smmu-s1-bypass")) {
+ if (iommu_domain_set_attr(emac_emb_smmu_ctx.mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS,
+ &bypass)) {
+ ETHQOSERR("Couldn't set SMMU S1 bypass\n");
+ result = -EIO;
+ goto err_smmu_probe;
+ }
+ ETHQOSDBG("SMMU S1 BYPASS set\n");
+ } else {
+ if (iommu_domain_set_attr(emac_emb_smmu_ctx.mapping->domain,
+ DOMAIN_ATTR_ATOMIC,
+ &atomic_ctx)) {
+ ETHQOSERR("Couldn't set SMMU domain as atomic\n");
+ result = -EIO;
+ goto err_smmu_probe;
+ }
+ ETHQOSDBG("SMMU atomic set\n");
+ if (iommu_domain_set_attr(emac_emb_smmu_ctx.mapping->domain,
+ DOMAIN_ATTR_FAST,
+ &fast)) {
+ ETHQOSERR("Couldn't set FAST SMMU\n");
+ result = -EIO;
+ goto err_smmu_probe;
+ }
+ ETHQOSDBG("SMMU fast map set\n");
+ }
+
+ result = arm_iommu_attach_device(&emac_emb_smmu_ctx.smmu_pdev->dev,
+ emac_emb_smmu_ctx.mapping);
+ if (result) {
+ ETHQOSERR("couldn't attach to IOMMU ret=%d\n", result);
+ goto err_smmu_probe;
+ }
+
+ emac_emb_smmu_ctx.iommu_domain =
+ iommu_get_domain_for_dev(&emac_emb_smmu_ctx.smmu_pdev->dev);
+
+ ETHQOSDBG("Successfully attached to IOMMU\n");
+ if (emac_emb_smmu_ctx.pdev_master)
+ goto smmu_probe_done;
+
+err_smmu_probe:
+ if (emac_emb_smmu_ctx.mapping)
+ arm_iommu_release_mapping(emac_emb_smmu_ctx.mapping);
+ emac_emb_smmu_ctx.valid = false;
+
+smmu_probe_done:
+ emac_emb_smmu_ctx.ret = result;
+ return result;
+}
+
static int qcom_ethqos_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -445,23 +893,36 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
struct resource *res;
int ret;
+ ipc_emac_log_ctxt = ipc_log_context_create(IPCLOG_STATE_PAGES,
+ "emac", 0);
+ if (!ipc_emac_log_ctxt)
+ ETHQOSERR("Error creating logging context for emac\n");
+ else
+ ETHQOSDBG("IPC logging has been enabled for emac\n");
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "qcom,emac-smmu-embedded"))
+ return emac_emb_smmu_cb_probe(pdev);
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
if (ret)
return ret;
+ ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
+ if (!ethqos) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+ ethqos->pdev = pdev;
+
+ ethqos_init_reqgulators(ethqos);
+ ethqos_init_gpio(ethqos);
+
plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
if (IS_ERR(plat_dat)) {
dev_err(&pdev->dev, "dt configuration failed\n");
return PTR_ERR(plat_dat);
}
- ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
- if (!ethqos) {
- ret = -ENOMEM;
- goto err_mem;
- }
- ethqos->pdev = pdev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rgmii");
ethqos->rgmii_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ethqos->rgmii_base)) {
@@ -492,12 +953,48 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->pmt = 1;
plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_property_read_bool(pdev->dev.of_node, "qcom,arm-smmu")) {
+ emac_emb_smmu_ctx.pdev_master = pdev;
+ ret = of_platform_populate(pdev->dev.of_node,
+ qcom_ethqos_match, NULL, &pdev->dev);
+ if (ret)
+ ETHQOSERR("Failed to populate EMAC platform\n");
+ if (emac_emb_smmu_ctx.ret) {
+ ETHQOSERR("smmu probe failed\n");
+ of_platform_depopulate(&pdev->dev);
+ ret = emac_emb_smmu_ctx.ret;
+ emac_emb_smmu_ctx.ret = 0;
+ }
+ }
+
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
goto err_clk;
+ ethqos->emac_ver = rgmii_readl(ethqos,
+ EMAC_I0_EMAC_CORE_HW_VERSION_RGOFFADDR);
+
+ if (!ethqos_phy_intr_config(ethqos))
+ ethqos_phy_intr_enable(ethqos);
+ else
+ ETHQOSERR("Phy interrupt configuration failed");
rgmii_dump(ethqos);
+ if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG) {
+ ethqos_pps_irq_config(ethqos);
+ create_pps_interrupt_device_node(&ethqos->avb_class_a_dev_t,
+ &ethqos->avb_class_a_cdev,
+ &ethqos->avb_class_a_class,
+ AVB_CLASS_A_POLL_DEV_NODE);
+
+ create_pps_interrupt_device_node(&ethqos->avb_class_b_dev_t,
+ &ethqos->avb_class_b_cdev,
+ &ethqos->avb_class_b_class,
+ AVB_CLASS_B_POLL_DEV_NODE);
+ }
+
+ pethqos = ethqos;
+ ethqos_create_debugfs(ethqos);
return ret;
err_clk:
@@ -521,13 +1018,14 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
ret = stmmac_pltfr_remove(pdev);
clk_disable_unprepare(ethqos->rgmii_clk);
+ if (phy_intr_en)
+ free_irq(ethqos->phy_intr, ethqos);
+ emac_emb_smmu_exit();
+ ethqos_disable_regulators(ethqos);
+
return ret;
}
-static const struct of_device_id qcom_ethqos_match[] = {
- { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_por},
- { }
-};
MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
static struct platform_driver qcom_ethqos_driver = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h
new file mode 100644
index 000000000000..9133d9a90f1e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.h
@@ -0,0 +1,183 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DWMAC_QCOM_ETHQOS_H
+#define _DWMAC_QCOM_ETHQOS_H
+
+#include <linux/ipc_logging.h>
+
+extern void *ipc_emac_log_ctxt;
+
+#define IPCLOG_STATE_PAGES 50
+#define __FILENAME__ (strrchr(__FILE__, '/') ? \
+ strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define DRV_NAME "qcom-ethqos"
+#define ETHQOSDBG(fmt, args...) \
+ pr_debug(DRV_NAME " %s:%d " fmt, __func__, ## args)
+#define ETHQOSERR(fmt, args...) \
+do {\
+ pr_err(DRV_NAME " %s:%d " fmt, __func__, ## args);\
+ if (ipc_emac_log_ctxt) { \
+ ipc_log_string(ipc_emac_log_ctxt, \
+ "%s: %s[%u]:[emac] ERROR:" fmt, __FILENAME__,\
+ __func__, __LINE__, ## args); \
+ } \
+} while (0)
+#define ETHQOSINFO(fmt, args...) \
+ pr_info(DRV_NAME " %s:%d " fmt, __func__, ## args)
+#define RGMII_IO_MACRO_CONFIG 0x0
+#define SDCC_HC_REG_DLL_CONFIG 0x4
+#define SDCC_HC_REG_DDR_CONFIG 0xC
+#define SDCC_HC_REG_DLL_CONFIG2 0x10
+#define SDC4_STATUS 0x14
+#define SDCC_USR_CTL 0x18
+#define RGMII_IO_MACRO_CONFIG2 0x1C
+
+#define ETHQOS_CONFIG_PPSOUT_CMD 44
+#define MAC_PPS_CONTROL 0x00000b70
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define MCGRENX(x) BIT(PPS_MAXIDX(x))
+#define PPSEN0 BIT(4)
+#define MAC_PPSX_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10))
+#define MAC_PPSX_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10))
+#define TRGTBUSY0 BIT(31)
+#define TTSL0 GENMASK(30, 0)
+#define MAC_PPSX_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
+#define MAC_PPSX_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+
+#define DWC_ETH_QOS_PPS_CH_2 2
+#define DWC_ETH_QOS_PPS_CH_3 3
+
+#define AVB_CLASS_A_POLL_DEV_NODE "avb_class_a_intr"
+
+#define AVB_CLASS_B_POLL_DEV_NODE "avb_class_b_intr"
+
+#define AVB_CLASS_A_CHANNEL_NUM 2
+#define AVB_CLASS_B_CHANNEL_NUM 3
+
+static inline u32 PPSCMDX(u32 x, u32 val)
+{
+ return (GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) &
+ ((val) << PPS_MINIDX(x)));
+}
+
+static inline u32 TRGTMODSELX(u32 x, u32 val)
+{
+ return (GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) &
+ ((val) << (PPS_MAXIDX(x) - 2)));
+}
+
+static inline u32 PPSX_MASK(u32 x)
+{
+ return GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x));
+}
+
+struct ethqos_emac_por {
+ unsigned int offset;
+ unsigned int value;
+};
+
+static const struct ethqos_emac_por emac_v2_3_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_por emac_v2_3_2_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x80040800 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+struct qcom_ethqos {
+ struct platform_device *pdev;
+ void __iomem *rgmii_base;
+
+ unsigned int rgmii_clk_rate;
+ struct clk *rgmii_clk;
+ unsigned int speed;
+
+ int gpio_phy_intr_redirect;
+ u32 phy_intr;
+ /* Work struct for handling phy interrupt */
+ struct work_struct emac_phy_work;
+
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+ unsigned int emac_ver;
+
+ struct regulator *gdsc_emac;
+ struct regulator *reg_rgmii;
+ struct regulator *reg_emac_phy;
+ struct regulator *reg_rgmii_io_pads;
+
+ u32 pps_class_a_irq;
+ u32 pps_class_b_irq;
+
+ struct pinctrl_state *emac_pps_0;
+
+ /* avb_class_a dev node variables*/
+ dev_t avb_class_a_dev_t;
+ struct cdev *avb_class_a_cdev;
+ struct class *avb_class_a_class;
+
+ /* avb_class_b dev node variables*/
+ dev_t avb_class_b_dev_t;
+ struct cdev *avb_class_b_cdev;
+ struct class *avb_class_b_class;
+
+ unsigned long avb_class_a_intr_cnt;
+ unsigned long avb_class_b_intr_cnt;
+ struct dentry *debugfs_dir;
+};
+
+struct pps_cfg {
+ unsigned int ptpclk_freq;
+ unsigned int ppsout_freq;
+ unsigned int ppsout_ch;
+ unsigned int ppsout_duty;
+ unsigned int ppsout_start;
+};
+
+struct ifr_data_struct {
+ unsigned int flags;
+ unsigned int qinx; /* dma channel no to be configured */
+ unsigned int cmd;
+ unsigned int context_setup;
+ unsigned int connected_speed;
+ unsigned int rwk_filter_values[8];
+ unsigned int rwk_filter_length;
+ int command_error;
+ int test_done;
+ void *ptr;
+};
+
+struct pps_info {
+ int channel_no;
+};
+
+int ethqos_init_reqgulators(struct qcom_ethqos *ethqos);
+void ethqos_disable_regulators(struct qcom_ethqos *ethqos);
+int ethqos_init_gpio(struct qcom_ethqos *ethqos);
+void ethqos_free_gpios(struct qcom_ethqos *ethqos);
+int create_pps_interrupt_device_node(dev_t *pps_dev_t,
+ struct cdev **pps_cdev,
+ struct class **pps_class,
+ char *pps_dev_node_name);
+#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-gpio.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-gpio.c
new file mode 100644
index 000000000000..892943a9747b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-gpio.c
@@ -0,0 +1,237 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_gpio.h>
+
+#include "stmmac.h"
+#include "dwmac-qcom-ethqos.h"
+
+#define EMAC_GDSC_EMAC_NAME "gdsc_emac"
+#define EMAC_VREG_RGMII_NAME "vreg_rgmii"
+#define EMAC_VREG_EMAC_PHY_NAME "vreg_emac_phy"
+#define EMAC_VREG_RGMII_IO_PADS_NAME "vreg_rgmii_io_pads"
+#define EMAC_PIN_PPS0 "dev-emac_pin_pps_0"
+
+
+static int setup_gpio_input_common
+ (struct device *dev, const char *name, int *gpio)
+{
+ int ret = 0;
+
+ if (of_find_property(dev->of_node, name, NULL)) {
+ *gpio = ret = of_get_named_gpio(dev->of_node, name, 0);
+ if (ret >= 0) {
+ ret = gpio_request(*gpio, name);
+ if (ret) {
+ ETHQOSERR("%s: Can't get GPIO %s, ret = %d\n",
+ name, *gpio);
+ *gpio = -1;
+ return ret;
+ }
+
+ ret = gpio_direction_input(*gpio);
+ if (ret) {
+ ETHQOSERR(
+ "%s: Can't set GPIO %s direction, ret = %d\n",
+ name, ret);
+ return ret;
+ }
+ } else {
+ if (ret == -EPROBE_DEFER)
+ ETHQOSERR("get EMAC_GPIO probe defer\n");
+ else
+ ETHQOSERR("can't get gpio %s ret %d\n", name,
+ ret);
+ return ret;
+ }
+ } else {
+ ETHQOSERR("can't find gpio %s\n", name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int ethqos_init_reqgulators(struct qcom_ethqos *ethqos)
+{
+ int ret = 0;
+
+ if (of_property_read_bool(ethqos->pdev->dev.of_node,
+ "gdsc_emac-supply")) {
+ ethqos->gdsc_emac =
+ devm_regulator_get(&ethqos->pdev->dev, EMAC_GDSC_EMAC_NAME);
+ if (IS_ERR(ethqos->gdsc_emac)) {
+ ETHQOSERR("Can not get <%s>\n", EMAC_GDSC_EMAC_NAME);
+ return PTR_ERR(ethqos->gdsc_emac);
+ }
+
+ ret = regulator_enable(ethqos->gdsc_emac);
+ if (ret) {
+ ETHQOSERR("Can not enable <%s>\n", EMAC_GDSC_EMAC_NAME);
+ goto reg_error;
+ }
+
+ ETHQOSDBG("Enabled <%s>\n", EMAC_GDSC_EMAC_NAME);
+ }
+
+ if (of_property_read_bool(ethqos->pdev->dev.of_node,
+ "vreg_rgmii-supply")) {
+ ethqos->reg_rgmii =
+ devm_regulator_get(&ethqos->pdev->dev, EMAC_VREG_RGMII_NAME);
+ if (IS_ERR(ethqos->reg_rgmii)) {
+ ETHQOSERR("Can not get <%s>\n", EMAC_VREG_RGMII_NAME);
+ return PTR_ERR(ethqos->reg_rgmii);
+ }
+
+ ret = regulator_enable(ethqos->reg_rgmii);
+ if (ret) {
+ ETHQOSERR("Can not enable <%s>\n",
+ EMAC_VREG_RGMII_NAME);
+ goto reg_error;
+ }
+
+ ETHQOSDBG("Enabled <%s>\n", EMAC_VREG_RGMII_NAME);
+ }
+
+ if (of_property_read_bool(ethqos->pdev->dev.of_node,
+ "vreg_emac_phy-supply")) {
+ ethqos->reg_emac_phy =
+ devm_regulator_get(&ethqos->pdev->dev, EMAC_VREG_EMAC_PHY_NAME);
+ if (IS_ERR(ethqos->reg_emac_phy)) {
+ ETHQOSERR("Can not get <%s>\n",
+ EMAC_VREG_EMAC_PHY_NAME);
+ return PTR_ERR(ethqos->reg_emac_phy);
+ }
+
+ ret = regulator_enable(ethqos->reg_emac_phy);
+ if (ret) {
+ ETHQOSERR("Can not enable <%s>\n",
+ EMAC_VREG_EMAC_PHY_NAME);
+ goto reg_error;
+ }
+
+ ETHQOSDBG("Enabled <%s>\n", EMAC_VREG_EMAC_PHY_NAME);
+ }
+
+ if (of_property_read_bool(ethqos->pdev->dev.of_node,
+ "vreg_rgmii_io_pads-supply")) {
+ ethqos->reg_rgmii_io_pads = devm_regulator_get
+ (&ethqos->pdev->dev, EMAC_VREG_RGMII_IO_PADS_NAME);
+ if (IS_ERR(ethqos->reg_rgmii_io_pads)) {
+ ETHQOSERR("Can not get <%s>\n",
+ EMAC_VREG_RGMII_IO_PADS_NAME);
+ return PTR_ERR(ethqos->reg_rgmii_io_pads);
+ }
+
+ ret = regulator_enable(ethqos->reg_rgmii_io_pads);
+ if (ret) {
+ ETHQOSERR("Can not enable <%s>\n",
+ EMAC_VREG_RGMII_IO_PADS_NAME);
+ goto reg_error;
+ }
+
+ ETHQOSDBG("Enabled <%s>\n", EMAC_VREG_RGMII_IO_PADS_NAME);
+ }
+
+ return ret;
+
+reg_error:
+ ETHQOSERR("%s failed\n", __func__);
+ ethqos_disable_regulators(ethqos);
+ return ret;
+}
+
+void ethqos_disable_regulators(struct qcom_ethqos *ethqos)
+{
+ if (ethqos->reg_rgmii) {
+ regulator_disable(ethqos->reg_rgmii);
+ devm_regulator_put(ethqos->reg_rgmii);
+ ethqos->reg_rgmii = NULL;
+ }
+
+ if (ethqos->reg_emac_phy) {
+ regulator_disable(ethqos->reg_emac_phy);
+ devm_regulator_put(ethqos->reg_emac_phy);
+ ethqos->reg_emac_phy = NULL;
+ }
+
+ if (ethqos->reg_rgmii_io_pads) {
+ regulator_disable(ethqos->reg_rgmii_io_pads);
+ devm_regulator_put(ethqos->reg_rgmii_io_pads);
+ ethqos->reg_rgmii_io_pads = NULL;
+ }
+
+ if (ethqos->gdsc_emac) {
+ regulator_disable(ethqos->gdsc_emac);
+ devm_regulator_put(ethqos->gdsc_emac);
+ ethqos->gdsc_emac = NULL;
+ }
+}
+
+void ethqos_free_gpios(struct qcom_ethqos *ethqos)
+{
+ if (gpio_is_valid(ethqos->gpio_phy_intr_redirect))
+ gpio_free(ethqos->gpio_phy_intr_redirect);
+ ethqos->gpio_phy_intr_redirect = -1;
+}
+
+int ethqos_init_gpio(struct qcom_ethqos *ethqos)
+{
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *emac_pps_0;
+
+ ethqos->gpio_phy_intr_redirect = -1;
+ int ret = 0;
+
+ pinctrl = devm_pinctrl_get(&ethqos->pdev->dev);
+ if (IS_ERR_OR_NULL(pinctrl)) {
+ ret = PTR_ERR(pinctrl);
+ ETHQOSERR("Failed to get pinctrl, err = %d\n", ret);
+ return ret;
+ }
+ ETHQOSDBG("get pinctrl succeed\n");
+
+ ret = setup_gpio_input_common(
+ &ethqos->pdev->dev, "qcom,phy-intr-redirect",
+ &ethqos->gpio_phy_intr_redirect);
+
+ if (ret) {
+ ETHQOSERR("Failed to setup <%s> gpio\n",
+ "qcom,phy-intr-redirect");
+ goto gpio_error;
+ }
+
+ emac_pps_0 = pinctrl_lookup_state(pinctrl, EMAC_PIN_PPS0);
+ if (IS_ERR_OR_NULL(emac_pps_0)) {
+ ret = PTR_ERR(emac_pps_0);
+ ETHQOSERR("Failed to get emac_pps_0, err = %d\n", ret);
+ return ret;
+ }
+ ETHQOSDBG("Get emac_pps_0 succeed\n");
+ ret = pinctrl_select_state(pinctrl, emac_pps_0);
+ if (ret)
+ ETHQOSERR("Unable to set emac_pps_0 state, err = %d\n",
+ ret);
+ else
+ ETHQOSDBG("Set emac_pps_0 succeed\n");
+
+ return ret;
+
+gpio_error:
+ ethqos_free_gpios(ethqos);
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c
new file mode 100644
index 000000000000..64edb6e2d60b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-pps.c
@@ -0,0 +1,444 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/of_mdio.h>
+#include <linux/slab.h>
+#include <linux/ipc_logging.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include "stmmac.h"
+#include "stmmac_platform.h"
+#include "stmmac_ptp.h"
+#include "dwmac-qcom-ethqos.h"
+
+extern struct qcom_ethqos *pethqos;
+
+static bool avb_class_a_msg_wq_flag;
+static bool avb_class_b_msg_wq_flag;
+
+static DECLARE_WAIT_QUEUE_HEAD(avb_class_a_msg_wq);
+static DECLARE_WAIT_QUEUE_HEAD(avb_class_b_msg_wq);
+
+static int strlcmp(const char *s, const char *t, size_t n)
+{
+ int ret;
+
+ while (n-- && *t != '\0') {
+ if (*s != *t) {
+ ret = ((unsigned char)*s - (unsigned char)*t);
+ n = 0;
+ } else {
+ ++s, ++t;
+ ret = (unsigned char)*s;
+ }
+ }
+ return ret;
+}
+
+static u32 pps_config_sub_second_increment(void __iomem *ioaddr,
+ u32 ptp_clock, int gmac4)
+{
+ u32 value = readl_relaxed(ioaddr + PTP_TCR);
+ unsigned long data;
+ unsigned int sns_inc = 0;
+ u32 reg_value;
+ u32 reg_value2;
+ /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where ptp_clock is 50MHz if fine method is used to update system
+ */
+ if (value & PTP_TCR_TSCFUPDT) {
+ data = (1000000000ULL / ptp_clock);
+ sns_inc = 1000000000ull - (data * ptp_clock);
+ sns_inc = (sns_inc * 256) / ptp_clock;
+
+ } else {
+ data = (1000000000ULL / ptp_clock);
+ }
+ /* 0.465ns accuracy */
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
+
+ data &= PTP_SSIR_SSINC_MASK;
+
+ reg_value = data;
+ if (gmac4)
+ reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
+
+ sns_inc &= PTP_SSIR_SNSINC_MASK;
+ reg_value2 = sns_inc;
+ if (gmac4)
+ reg_value2 <<= GMAC4_PTP_SSIR_SNSINC_SHIFT;
+ writel_relaxed(reg_value + reg_value2, ioaddr + PTP_SSIR);
+ return data;
+}
+
+static u32 pps_config_default_addend(void __iomem *ioaddr,
+ struct stmmac_priv *priv, u32 ptp_clock)
+{
+ u64 temp;
+
+ /* formula is :
+ * addend = 2^32/freq_div_ratio;
+ *
+ * where, freq_div_ratio = DWC_ETH_QOS_SYSCLOCK/50MHz
+ *
+ * hence, addend = ((2^32) * 50MHz)/DWC_ETH_QOS_SYSCLOCK;
+ *
+ * NOTE: DWC_ETH_QOS_SYSCLOCK should be >= 50MHz to
+ * achive 20ns accuracy.
+ *
+ * 2^x * y == (y << x), hence
+ * 2^32 * 50000000 ==> (50000000 << 32)
+ */
+ if (ptp_clock == 250000000) {
+ // If PTP_CLOCK == SYS_CLOCK, best we can do is 2^32 - 1
+ priv->default_addend = 0xFFFFFFFF;
+ } else {
+ temp = (u64)((u64)ptp_clock << 32);
+ priv->default_addend = div_u64(temp, 250000000);
+ }
+ priv->hw->ptp->config_addend(ioaddr, priv->default_addend);
+
+ return 1;
+}
+
+int ppsout_stop(struct stmmac_priv *priv, struct pps_cfg *eth_pps_cfg)
+{
+ u32 val;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ val |= PPSCMDX(eth_pps_cfg->ppsout_ch, 0x5);
+ val |= TRGTMODSELX(eth_pps_cfg->ppsout_ch, 0x3);
+ val |= PPSEN0;
+ writel_relaxed(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
+}
+
+static irqreturn_t ethqos_pps_avb_class_a(int irq, void *dev_id)
+{
+ struct stmmac_priv *priv =
+ (struct stmmac_priv *)dev_id;
+
+ struct qcom_ethqos *ethqos = priv->plat->bsp_priv;
+
+ ethqos->avb_class_a_intr_cnt++;
+ avb_class_a_msg_wq_flag = 1;
+ wake_up_interruptible(&avb_class_a_msg_wq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ethqos_pps_avb_class_b(int irq, void *dev_id)
+{
+ struct stmmac_priv *priv =
+ (struct stmmac_priv *)dev_id;
+
+ struct qcom_ethqos *ethqos = priv->plat->bsp_priv;
+
+ ethqos->avb_class_b_intr_cnt++;
+ avb_class_b_msg_wq_flag = 1;
+ wake_up_interruptible(&avb_class_b_msg_wq);
+ return IRQ_HANDLED;
+}
+
+static void ethqos_register_pps_isr(struct stmmac_priv *priv, int ch)
+{
+ int ret;
+ struct qcom_ethqos *ethqos = priv->plat->bsp_priv;
+
+ if (ch == DWC_ETH_QOS_PPS_CH_2) {
+ ret = request_irq(ethqos->pps_class_a_irq,
+ ethqos_pps_avb_class_a,
+ IRQF_TRIGGER_RISING, "stmmac_pps", priv);
+ if (ret)
+ ETHQOSERR("pps_avb_class_a_irq Failed ret=%d\n", ret);
+ else
+ ETHQOSDBG("pps_avb_class_a_irq pass\n");
+
+ } else if (ch == DWC_ETH_QOS_PPS_CH_3) {
+ ret = request_irq(ethqos->pps_class_b_irq,
+ ethqos_pps_avb_class_b,
+ IRQF_TRIGGER_RISING, "stmmac_pps", priv);
+ if (ret)
+ ETHQOSERR("pps_avb_class_b_irq Failed ret=%d\n", ret);
+ else
+ ETHQOSDBG("pps_avb_class_b_irq pass\n");
+ }
+}
+
+int ppsout_config(struct stmmac_priv *priv, struct ifr_data_struct *req)
+{
+ struct pps_cfg *eth_pps_cfg = (struct pps_cfg *)req->ptr;
+ int interval, width;
+ u32 sub_second_inc, value;
+ void __iomem *ioaddr = priv->ioaddr;
+
+ if (!eth_pps_cfg->ppsout_start) {
+ ppsout_stop(priv, eth_pps_cfg);
+ return 0;
+ }
+
+ value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSUPDT);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
+ priv->hw->ptp->init_systime(priv->ptpaddr, 0, 0);
+ priv->hw->ptp->adjust_systime(priv->ptpaddr, 0, 0, 0, 1);
+
+ u32 val = readl_relaxed(ioaddr + MAC_PPS_CONTROL);
+
+ sub_second_inc = pps_config_sub_second_increment
+ (priv->ptpaddr, eth_pps_cfg->ptpclk_freq,
+ priv->plat->has_gmac4);
+ pps_config_default_addend(priv->ptpaddr, priv,
+ eth_pps_cfg->ptpclk_freq);
+
+ val &= ~PPSX_MASK(eth_pps_cfg->ppsout_ch);
+
+ val |= PPSCMDX(eth_pps_cfg->ppsout_ch, 0x2);
+ val |= TRGTMODSELX(eth_pps_cfg->ppsout_ch, 0x2);
+ val |= PPSEN0;
+
+ if (eth_pps_cfg->ppsout_ch == DWC_ETH_QOS_PPS_CH_2 ||
+ eth_pps_cfg->ppsout_ch == DWC_ETH_QOS_PPS_CH_3)
+ ethqos_register_pps_isr(priv, eth_pps_cfg->ppsout_ch);
+
+ writel_relaxed(0, ioaddr +
+ MAC_PPSX_TARGET_TIME_SEC(eth_pps_cfg->ppsout_ch));
+
+ writel_relaxed(0, ioaddr +
+ MAC_PPSX_TARGET_TIME_NSEC(eth_pps_cfg->ppsout_ch));
+
+ interval = ((eth_pps_cfg->ptpclk_freq + eth_pps_cfg->ppsout_freq / 2)
+ / eth_pps_cfg->ppsout_freq);
+
+ width = ((interval * eth_pps_cfg->ppsout_duty) + 50) / 100 - 1;
+ if (width >= interval)
+ width = interval - 1;
+ if (width < 0)
+ width = 0;
+
+ writel_relaxed(interval, ioaddr +
+ MAC_PPSX_INTERVAL(eth_pps_cfg->ppsout_ch));
+
+ writel_relaxed(width, ioaddr + MAC_PPSX_WIDTH(eth_pps_cfg->ppsout_ch));
+
+ writel_relaxed(val, ioaddr + MAC_PPS_CONTROL);
+
+ return 0;
+}
+
+int ethqos_handle_prv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct stmmac_priv *pdata = netdev_priv(dev);
+ struct ifr_data_struct req;
+ struct pps_cfg eth_pps_cfg;
+ int ret = 0;
+
+ if (copy_from_user(&req, ifr->ifr_ifru.ifru_data,
+ sizeof(struct ifr_data_struct)))
+ return -EFAULT;
+ if (copy_from_user(&eth_pps_cfg, req.ptr,
+ sizeof(struct pps_cfg)))
+ return -EFAULT;
+ req.ptr = &eth_pps_cfg;
+ switch (req.cmd) {
+ case ETHQOS_CONFIG_PPSOUT_CMD:
+ ret = ppsout_config(pdata, &req);
+ }
+ return ret;
+}
+
+static ssize_t pps_fops_read(struct file *filp, char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ unsigned int len = 0, buf_len = 5000;
+ char *temp_buf;
+ ssize_t ret_cnt = 0;
+ struct pps_info *info;
+
+ info = filp->private_data;
+
+ if (info->channel_no == AVB_CLASS_A_CHANNEL_NUM) {
+ avb_class_a_msg_wq_flag = 0;
+ temp_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!temp_buf)
+ return -ENOMEM;
+
+ if (pethqos)
+ len = scnprintf(temp_buf, buf_len,
+ "%ld\n", pethqos->avb_class_a_intr_cnt);
+ else
+ len = scnprintf(temp_buf, buf_len, "0\n");
+
+ ret_cnt = simple_read_from_buffer(buf, count, f_pos,
+ temp_buf, len);
+ kfree(temp_buf);
+ if (pethqos)
+ ETHQOSERR("poll pps2intr info=%d sent by kernel\n",
+ pethqos->avb_class_a_intr_cnt);
+ } else if (info->channel_no == AVB_CLASS_B_CHANNEL_NUM) {
+ avb_class_b_msg_wq_flag = 0;
+ temp_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!temp_buf)
+ return -ENOMEM;
+
+ if (pethqos)
+ len = scnprintf(temp_buf, buf_len,
+ "%ld\n", pethqos->avb_class_b_intr_cnt);
+ else
+ len = scnprintf(temp_buf, buf_len, "0\n");
+
+ ret_cnt = simple_read_from_buffer
+ (buf, count, f_pos, temp_buf, len);
+ kfree(temp_buf);
+
+ } else {
+ ETHQOSERR("invalid channel %d\n", info->channel_no);
+ }
+ return ret_cnt;
+}
+
+static unsigned int pps_fops_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct pps_info *info;
+
+ info = file->private_data;
+ if (info->channel_no == AVB_CLASS_A_CHANNEL_NUM) {
+ ETHQOSERR("avb_class_a_fops_poll wait\n");
+
+ poll_wait(file, &avb_class_a_msg_wq, wait);
+
+ if (avb_class_a_msg_wq_flag == 1) {
+ //Sending read mask
+ mask |= POLLIN | POLLRDNORM;
+ }
+ } else if (info->channel_no == AVB_CLASS_B_CHANNEL_NUM) {
+ poll_wait(file, &avb_class_b_msg_wq, wait);
+
+ if (avb_class_b_msg_wq_flag == 1) {
+ //Sending read mask
+ mask |= POLLIN | POLLRDNORM;
+ }
+ } else {
+ ETHQOSERR("invalid channel %d\n", info->channel_no);
+ }
+ return mask;
+}
+
+static int pps_open(struct inode *inode, struct file *file)
+{
+ struct pps_info *info;
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (!strlcmp(file->f_path.dentry->d_iname,
+ AVB_CLASS_A_POLL_DEV_NODE,
+ strlen(AVB_CLASS_A_POLL_DEV_NODE))) {
+ ETHQOSERR("pps open file name =%s\n",
+ file->f_path.dentry->d_iname);
+ info->channel_no = AVB_CLASS_A_CHANNEL_NUM;
+ } else if (!strlcmp(file->f_path.dentry->d_iname,
+ AVB_CLASS_B_POLL_DEV_NODE,
+ strlen(AVB_CLASS_B_POLL_DEV_NODE))) {
+ ETHQOSERR("pps open file name =%s\n",
+ file->f_path.dentry->d_iname);
+ info->channel_no = AVB_CLASS_B_CHANNEL_NUM;
+ } else {
+ ETHQOSERR("stsrncmp failed for %s\n",
+ file->f_path.dentry->d_iname);
+ }
+ file->private_data = info;
+ return 0;
+}
+
+static int pps_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations pps_fops = {
+ .owner = THIS_MODULE,
+ .open = pps_open,
+ .release = pps_release,
+ .read = pps_fops_read,
+ .poll = pps_fops_poll,
+};
+
+int create_pps_interrupt_device_node(dev_t *pps_dev_t,
+ struct cdev **pps_cdev,
+ struct class **pps_class,
+ char *pps_dev_node_name)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(pps_dev_t, 0, 1,
+ pps_dev_node_name);
+ if (ret) {
+ ETHQOSERR("alloc_chrdev_region error for node %s\n",
+ pps_dev_node_name);
+ goto alloc_chrdev1_region_fail;
+ }
+
+ *pps_cdev = cdev_alloc();
+ if (!*pps_cdev) {
+ ret = -ENOMEM;
+ ETHQOSERR("failed to alloc cdev\n");
+ goto fail_alloc_cdev;
+ }
+ cdev_init(*pps_cdev, &pps_fops);
+
+ ret = cdev_add(*pps_cdev, *pps_dev_t, 1);
+ if (ret < 0) {
+ ETHQOSERR(":cdev_add err=%d\n", -ret);
+ goto cdev1_add_fail;
+ }
+
+ *pps_class = class_create(THIS_MODULE, pps_dev_node_name);
+ if (!*pps_class) {
+ ret = -ENODEV;
+ ETHQOSERR("failed to create class\n");
+ goto fail_create_class;
+ }
+
+ if (!device_create(*pps_class, NULL,
+ *pps_dev_t, NULL, pps_dev_node_name)) {
+ ret = -EINVAL;
+ ETHQOSERR("failed to create device_create\n");
+ goto fail_create_device;
+ }
+
+ return 0;
+
+fail_create_device:
+ class_destroy(*pps_class);
+fail_create_class:
+ cdev_del(*pps_cdev);
+cdev1_add_fail:
+fail_alloc_cdev:
+ unregister_chrdev_region(*pps_dev_t, 1);
+alloc_chrdev1_region_fail:
+ return ret;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d4c3bf78d928..c6a3d804db3a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -47,10 +47,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
if (nopaged_len > BUF_SIZE_8KiB) {
- des2 = dma_map_single(priv->device, skb->data, bmax,
+ des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data, bmax,
DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
- if (dma_mapping_error(priv->device, des2))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
return -1;
tx_q->tx_skbuff_dma[entry].buf = des2;
@@ -69,10 +69,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
else
desc = tx_q->dma_tx + entry;
- des2 = dma_map_single(priv->device, skb->data + bmax, len,
+ des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data + bmax, len,
DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
- if (dma_mapping_error(priv->device, des2))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
return -1;
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = len;
@@ -83,10 +83,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE, 1,
true, skb->len);
} else {
- des2 = dma_map_single(priv->device, skb->data,
+ des2 = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des2 = cpu_to_le32(des2);
- if (dma_mapping_error(priv->device, des2))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des2))
return -1;
tx_q->tx_skbuff_dma[entry].buf = des2;
tx_q->tx_skbuff_dma[entry].len = nopaged_len;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a916e13624eb..c441a4514dd1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -96,7 +96,9 @@ struct stmmac_priv {
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- spinlock_t lock;
+
+ /* Mutex lock */
+ struct mutex lock;
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
@@ -147,6 +149,26 @@ struct stmmac_priv {
#endif
};
+struct emac_emb_smmu_cb_ctx {
+ bool valid;
+ struct platform_device *pdev_master;
+ struct platform_device *smmu_pdev;
+ struct dma_iommu_mapping *mapping;
+ struct iommu_domain *iommu_domain;
+ u32 va_start;
+ u32 va_size;
+ u32 va_end;
+ int ret;
+};
+
+extern struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx;
+
+#define GET_MEM_PDEV_DEV (emac_emb_smmu_ctx.valid ? \
+ &emac_emb_smmu_ctx.smmu_pdev->dev : priv->device)
+
+int ethqos_handle_prv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+extern bool phy_intr_en;
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index ecddd9948788..8c7109008185 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -392,13 +392,13 @@ stmmac_ethtool_set_link_ksettings(struct net_device *dev,
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full);
- spin_lock(&priv->lock);
+ mutex_lock(&priv->lock);
if (priv->hw->mac->pcs_ctrl_ane)
priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
priv->hw->ps, 0);
- spin_unlock(&priv->lock);
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -615,12 +615,12 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- spin_lock_irq(&priv->lock);
+ mutex_lock(&priv->lock);
if (device_can_wakeup(priv->device)) {
wol->supported = WAKE_MAGIC | WAKE_UCAST;
wol->wolopts = priv->wolopts;
}
- spin_unlock_irq(&priv->lock);
+ mutex_unlock(&priv->lock);
}
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -649,9 +649,9 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
disable_irq_wake(priv->wol_irq);
}
- spin_lock_irq(&priv->lock);
+ mutex_lock(&priv->lock);
priv->wolopts = wol->wolopts;
- spin_unlock_irq(&priv->lock);
+ mutex_unlock(&priv->lock);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 08c19ebd5306..66521360b4b0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -24,6 +24,8 @@
#include "common.h"
#include "stmmac_ptp.h"
+#define PTP_LIMIT 100000
+
static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
{
writel(data, ioaddr + PTP_TCR);
@@ -65,6 +67,16 @@ static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
int limit;
u32 value;
+ /* wait for previous(if any) time initialization to complete. */
+ limit = PTP_LIMIT;
+ while (limit--) {
+ if (!(readl_relaxed(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
+ break;
+ usleep_range(1000, 1500);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
writel(sec, ioaddr + PTP_STSUR);
writel(nsec, ioaddr + PTP_STNSUR);
/* issue command to initialize the system time value */
@@ -115,6 +127,16 @@ static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
u32 value;
int limit;
+ /* wait for previous(if any) time adjust/update to complete. */
+ limit = PTP_LIMIT;
+ while (limit--) {
+ if (!(readl_relaxed(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ usleep_range(1000, 1500);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
if (add_sub) {
/* If the new sec value needs to be subtracted with
* the system time, then MAC_STSUR reg should be
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f85e540001f..137503b3112f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -392,7 +392,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
* changed).
* In that case the driver disable own timers.
*/
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (priv->eee_active) {
netdev_dbg(priv->dev, "disable EEE\n");
del_timer_sync(&priv->eee_ctrl_timer);
@@ -400,11 +400,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
tx_lpi_timer);
}
priv->eee_active = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
goto out;
}
/* Activate the EEE and start timers */
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (!priv->eee_active) {
priv->eee_active = 1;
setup_timer(&priv->eee_ctrl_timer,
@@ -421,7 +421,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
ret = true;
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
}
@@ -562,9 +562,6 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
/* PTP v1, UDP, any kind of event packet */
config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
@@ -597,9 +594,6 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
@@ -634,9 +628,6 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
/* take time stamp for all event messages */
- if (priv->plat->has_gmac4)
- snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
- else
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
@@ -805,7 +796,7 @@ static void stmmac_adjust_link(struct net_device *dev)
if (!phydev)
return;
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
if (phydev->link) {
u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
@@ -864,7 +855,7 @@ static void stmmac_adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
if (phydev->is_pseudo_fixed_link)
/* Stop PHY layer to call the hook to adjust the link in case
@@ -983,6 +974,16 @@ static int stmmac_init_phy(struct net_device *dev)
if (phydev->is_pseudo_fixed_link)
phydev->irq = PHY_POLL;
+ if (phy_intr_en) {
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+
+ if (phydev->drv->config_intr &&
+ !phydev->drv->config_intr(phydev)) {
+ pr_debug(" qcom-ethqos: %s config_phy_intr successful\n",
+ __func__);
+ }
+ }
phy_attached_info(phydev);
return 0;
}
@@ -1147,10 +1148,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -ENOMEM;
}
rx_q->rx_skbuff[i] = skb;
- rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
+ rx_q->rx_skbuff_dma[i] = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, rx_q->rx_skbuff_dma[i])) {
netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
dev_kfree_skb_any(skb);
return -EINVAL;
@@ -1179,7 +1180,7 @@ static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
if (rx_q->rx_skbuff[i]) {
- dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
+ dma_unmap_single(GET_MEM_PDEV_DEV, rx_q->rx_skbuff_dma[i],
priv->dma_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb_any(rx_q->rx_skbuff[i]);
}
@@ -1198,12 +1199,12 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
if (tx_q->tx_skbuff_dma[i].buf) {
if (tx_q->tx_skbuff_dma[i].map_as_page)
- dma_unmap_page(priv->device,
+ dma_unmap_page(GET_MEM_PDEV_DEV,
tx_q->tx_skbuff_dma[i].buf,
tx_q->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
else
- dma_unmap_single(priv->device,
+ dma_unmap_single(GET_MEM_PDEV_DEV,
tx_q->tx_skbuff_dma[i].buf,
tx_q->tx_skbuff_dma[i].len,
DMA_TO_DEVICE);
@@ -1444,11 +1445,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device,
+ dma_free_coherent(GET_MEM_PDEV_DEV,
DMA_RX_SIZE * sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, DMA_RX_SIZE *
+ dma_free_coherent(GET_MEM_PDEV_DEV, DMA_RX_SIZE *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1475,11 +1476,11 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device,
+ dma_free_coherent(GET_MEM_PDEV_DEV,
DMA_TX_SIZE * sizeof(struct dma_desc),
tx_q->dma_tx, tx_q->dma_tx_phy);
else
- dma_free_coherent(priv->device, DMA_TX_SIZE *
+ dma_free_coherent(GET_MEM_PDEV_DEV, DMA_TX_SIZE *
sizeof(struct dma_extended_desc),
tx_q->dma_etx, tx_q->dma_tx_phy);
@@ -1522,7 +1523,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- rx_q->dma_erx = dma_zalloc_coherent(priv->device,
+ rx_q->dma_erx = dma_zalloc_coherent(GET_MEM_PDEV_DEV,
DMA_RX_SIZE *
sizeof(struct
dma_extended_desc),
@@ -1532,7 +1533,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
} else {
- rx_q->dma_rx = dma_zalloc_coherent(priv->device,
+ rx_q->dma_rx = dma_zalloc_coherent(GET_MEM_PDEV_DEV,
DMA_RX_SIZE *
sizeof(struct
dma_desc),
@@ -1585,7 +1586,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
if (priv->extend_desc) {
- tx_q->dma_etx = dma_zalloc_coherent(priv->device,
+ tx_q->dma_etx = dma_zalloc_coherent(GET_MEM_PDEV_DEV,
DMA_TX_SIZE *
sizeof(struct
dma_extended_desc),
@@ -1594,7 +1595,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
if (!tx_q->dma_etx)
goto err_dma;
} else {
- tx_q->dma_tx = dma_zalloc_coherent(priv->device,
+ tx_q->dma_tx = dma_zalloc_coherent(GET_MEM_PDEV_DEV,
DMA_TX_SIZE *
sizeof(struct
dma_desc),
@@ -1866,12 +1867,12 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
if (tx_q->tx_skbuff_dma[entry].map_as_page)
- dma_unmap_page(priv->device,
+ dma_unmap_page(GET_MEM_PDEV_DEV,
tx_q->tx_skbuff_dma[entry].buf,
tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
else
- dma_unmap_single(priv->device,
+ dma_unmap_single(GET_MEM_PDEV_DEV,
tx_q->tx_skbuff_dma[entry].buf,
tx_q->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
@@ -2528,6 +2529,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
netdev_warn(priv->dev, "PTP not supported by HW\n");
else if (ret)
netdev_warn(priv->dev, "PTP init failed\n");
+ else
+ ret = clk_set_rate(priv->plat->clk_ptp_ref, 96000000);
}
#ifdef CONFIG_DEBUG_FS
@@ -2860,9 +2863,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
/* first descriptor: fill Headers on Buf1 */
- des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ des = dma_map_single(GET_MEM_PDEV_DEV, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, des))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
@@ -2883,10 +2886,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- des = skb_frag_dma_map(priv->device, frag, 0,
+ des = skb_frag_dma_map(GET_MEM_PDEV_DEV, frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, des))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
goto dma_map_err;
stmmac_tso_allocator(priv, des, skb_frag_size(frag),
@@ -3081,9 +3084,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc = tx_q->dma_tx + entry;
- des = skb_frag_dma_map(priv->device, frag, 0, len,
+ des = skb_frag_dma_map(GET_MEM_PDEV_DEV, frag, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, des))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
goto dma_map_err; /* should reuse desc w/o issues */
tx_q->tx_skbuff[entry] = NULL;
@@ -3166,9 +3169,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
bool last_segment = (nfrags == 0);
- des = dma_map_single(priv->device, skb->data,
+ des = dma_map_single(GET_MEM_PDEV_DEV, skb->data,
nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, des))
+ if (dma_mapping_error(GET_MEM_PDEV_DEV, des))
goto dma_map_err;
tx_q->tx_skbuff_dma[first_entry].buf = des;
@@ -3282,9 +3285,9 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
rx_q->rx_skbuff[entry] = skb;
rx_q->rx_skbuff_dma[entry] =
- dma_map_single(priv->device, skb->data, bfsize,
+ dma_map_single(GET_MEM_PDEV_DEV, skb->data, bfsize,
DMA_FROM_DEVICE);
- if (dma_mapping_error(priv->device,
+ if (dma_mapping_error(GET_MEM_PDEV_DEV,
rx_q->rx_skbuff_dma[entry])) {
netdev_err(priv->dev, "Rx DMA map failed\n");
dev_kfree_skb(skb);
@@ -3391,7 +3394,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
*/
dev_kfree_skb_any(rx_q->rx_skbuff[entry]);
rx_q->rx_skbuff[entry] = NULL;
- dma_unmap_single(priv->device,
+ dma_unmap_single(GET_MEM_PDEV_DEV,
rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
@@ -3457,7 +3460,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
break;
}
- dma_sync_single_for_cpu(priv->device,
+ dma_sync_single_for_cpu(GET_MEM_PDEV_DEV,
rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
@@ -3467,7 +3470,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
frame_len);
skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device,
+ dma_sync_single_for_device(GET_MEM_PDEV_DEV,
rx_q->rx_skbuff_dma
[entry], frame_len,
DMA_FROM_DEVICE);
@@ -3486,7 +3489,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
rx_q->rx_zeroc_thresh++;
skb_put(skb, frame_len);
- dma_unmap_single(priv->device,
+ dma_unmap_single(GET_MEM_PDEV_DEV,
rx_q->rx_skbuff_dma[entry],
priv->dma_buf_sz,
DMA_FROM_DEVICE);
@@ -3779,6 +3782,8 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
break;
case SIOCSHWTSTAMP:
ret = stmmac_hwtstamp_ioctl(dev, rq);
+ case SIOCDEVPRIVATE:
+ ret = ethqos_handle_prv_ioctl(dev, rq, cmd);
break;
default:
break;
@@ -4275,7 +4280,7 @@ int stmmac_dvr_probe(struct device *device,
(8 * priv->plat->rx_queues_to_use));
}
- spin_lock_init(&priv->lock);
+ mutex_init(&priv->lock);
/* If a specific clk_csr value is passed from the platform
* this means that the CSR Clock Range selection cannot be
@@ -4356,6 +4361,7 @@ int stmmac_dvr_remove(struct device *dev)
priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
+ mutex_destroy(&priv->lock);
free_netdev(ndev);
return 0;
@@ -4381,7 +4387,7 @@ int stmmac_suspend(struct device *dev)
if (ndev->phydev)
phy_stop(ndev->phydev);
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
@@ -4402,7 +4408,7 @@ int stmmac_suspend(struct device *dev)
clk_disable(priv->plat->pclk);
clk_disable(priv->plat->stmmac_clk);
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
priv->oldlink = false;
priv->speed = SPEED_UNKNOWN;
@@ -4458,9 +4464,9 @@ int stmmac_resume(struct device *dev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
priv->hw->mac->pmt(priv->hw, 0);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -4474,7 +4480,7 @@ int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
- spin_lock_irqsave(&priv->lock, flags);
+ mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
@@ -4493,7 +4499,7 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
+ mutex_unlock(&priv->lock);
if (ndev->phydev)
phy_start(ndev->phydev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
index f4b31d69f60e..a030920cd398 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -67,5 +67,7 @@
/* SSIR defines */
#define PTP_SSIR_SSINC_MASK 0xff
#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
+#define PTP_SSIR_SNSINC_MASK 0xff
+#define GMAC4_PTP_SSIR_SNSINC_SHIFT 8
#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
index a01a07152c86..5aed96093ae8 100644
--- a/drivers/net/ppp/pppopns.c
+++ b/drivers/net/ppp/pppopns.c
@@ -75,7 +75,10 @@ static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
__u32 now = jiffies;
struct header *hdr;
- /* Skip transport header */
+ if (skb_linearize(skb))
+ goto drop;
+
+ /* Skip network header */
skb_pull(skb, skb_transport_header(skb) - skb->data);
/* Drop the packet if GRE header is missing. */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 4afa40946dd1..5609d47d5ad5 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -850,6 +850,12 @@ static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
cnss_power_off_device(plat_priv);
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_dbg("recovery sleep start\n");
+ msleep(200);
+ cnss_pr_dbg("recovery sleep 200ms done\n");
+ }
+
pci_priv->remap_window = 0;
clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3.h
index 0d0778e4fc7f..697a13f21b0f 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v3.h
@@ -273,7 +273,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_TX, 0x04),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_RES_CODE_LANE_OFFSET_RX, 0x07),
UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_MODE_00, 0x59),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6D),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6F),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0F),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
@@ -281,7 +281,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL, 0x12),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL, 0x0F),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0F),
- UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0x9A), /* 8 us */
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_HIBERN8_TIME, 0xFF),/* 13 us */
};
static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane[] = {
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie.c b/drivers/platform/msm/ep_pcie/ep_pcie.c
index 9b5d2acbb22d..f560525e14f8 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -154,12 +154,13 @@ int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx)
}
EXPORT_SYMBOL(ep_pcie_trigger_msi);
-int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle)
+int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle,
+ enum ep_pcie_event event)
{
if (WARN_ON(!phandle))
return -EINVAL;
- return phandle->wakeup_host();
+ return phandle->wakeup_host(event);
}
EXPORT_SYMBOL(ep_pcie_wakeup_host);
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 7800c88dd630..b8022aff3b3b 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -41,6 +41,7 @@
#define PCIE_MHI_STATUS(n) ((n) + 0x148)
#define TCSR_PERST_SEPARATION_ENABLE 0x270
+#define PCIE_ISSUE_WAKE 1
/* debug mask sys interface */
static int ep_pcie_debug_mask;
@@ -2576,9 +2577,36 @@ int ep_pcie_core_trigger_msi(u32 idx)
return EP_PCIE_ERROR;
}
-int ep_pcie_core_wakeup_host(void)
+static void ep_pcie_core_issue_inband_pme(void)
{
struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+ unsigned long irqsave_flags;
+ u32 pm_ctrl = 0;
+
+ spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: request to assert inband wake\n",
+ dev->rev);
+
+ pm_ctrl = readl_relaxed(dev->parf + PCIE20_PARF_PM_CTRL);
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_PM_CTRL,
+ (pm_ctrl | BIT(4)));
+ ep_pcie_write_reg(dev->parf, PCIE20_PARF_PM_CTRL, pm_ctrl);
+
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: completed assert for inband wake\n",
+ dev->rev);
+
+ spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
+}
+
+static int ep_pcie_core_wakeup_host(enum ep_pcie_event event)
+{
+ struct ep_pcie_dev_t *dev = &ep_pcie_dev;
+
+ if (event == EP_PCIE_EVENT_PM_D3_HOT)
+ ep_pcie_core_issue_inband_pme();
if (dev->perst_deast && !dev->l23_ready) {
EP_PCIE_ERR(dev,
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c
index e6a2ef96cb42..3ca161997271 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_dbg.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -357,7 +357,7 @@ static ssize_t ep_pcie_cmd_debug(struct file *file,
ep_pcie_config_outbound_iatu(phandle, entries, 2);
break;
case 13: /* wake up the host */
- ep_pcie_wakeup_host(phandle);
+ ep_pcie_wakeup_host(phandle, EP_PCIE_EVENT_PM_D3_HOT);
break;
case 14: /* Configure routing of doorbells */
ep_pcie_config_db_routing(phandle, chdb_cfg, erdb_cfg);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 0d1c092e9b26..668b4ec7057b 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -1474,7 +1474,8 @@ static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
return 0;
}
-static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
+static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel,
+ bool update_state)
{
int res;
@@ -1490,7 +1491,8 @@ static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
- if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+ if ((ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) &&
+ update_state) {
res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
&channel->state, channel->channel_context_addr +
offsetof(struct ipa_mhi_ch_ctx, chstate),
@@ -1654,7 +1656,7 @@ int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
return 0;
fail_connect_pipe:
mutex_unlock(&mhi_client_general_mutex);
- ipa_mhi_reset_channel(channel);
+ ipa_mhi_reset_channel(channel, true);
fail_start_channel:
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
return -EPERM;
@@ -1702,7 +1704,7 @@ int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
- res = ipa_mhi_reset_channel(channel);
+ res = ipa_mhi_reset_channel(channel, false);
if (res) {
IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
goto fail_reset_channel;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index 18cf909ac8ed..75926334c79b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1428,6 +1428,9 @@ int ipa3_release_gsi_channel(u32 clnt_hdl)
if (!ep->keep_ipa_awake)
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+ /* Set the disconnect in progress flag to avoid calling cb.*/
+ atomic_set(&ep->disconnect_in_progress, 1);
+
gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (gsi_res != GSI_STATUS_SUCCESS) {
IPAERR("Error deallocating channel: %d\n", gsi_res);
@@ -1735,9 +1738,7 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
/* Set disconnect in progress flag so further flow control events are
* not honored.
*/
- spin_lock(&ipa3_ctx->disconnect_lock);
- ep->disconnect_in_progress = true;
- spin_unlock(&ipa3_ctx->disconnect_lock);
+ atomic_set(&ep->disconnect_in_progress, 1);
/* If flow is disabled at this point, restore the ep state.*/
ep_ctrl.ipa_ep_delay = false;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 5cd2df4d8152..23bb31be6c7a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -2669,25 +2669,26 @@ begin:
sys->status_stat->curr = 0;
}
- if ((status.status_opcode !=
- IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
- (status.status_opcode !=
- IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
- (status.status_opcode !=
- IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
- (status.status_opcode !=
- IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
- IPAERR("unsupported opcode(%d)\n",
+ switch (status.status_opcode) {
+ case IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET:
+ case IPAHAL_PKT_STATUS_OPCODE_PACKET:
+ case IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET:
+ case IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS:
+ case IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE:
+ break;
+ default:
+ IPAERR_RL("unsupported opcode(%d)\n",
status.status_opcode);
skb_pull(skb, pkt_status_sz);
continue;
}
+
IPA_STATS_EXCP_CNT(status.exception,
ipa3_ctx->stats.rx_excp_pkts);
if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
- IPAERR("status fields invalid\n");
- IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+ IPAERR_RL("status fields invalid\n");
+ IPAERR_RL("STATUS opcode=%d src=%d dst=%d len=%d\n",
status.status_opcode, status.endp_src_idx,
status.endp_dest_idx, status.pkt_len);
WARN_ON(1);
@@ -3082,11 +3083,8 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
metadata = status.metadata;
ucp = status.ucp;
ep = &ipa3_ctx->ep[src_pipe];
- if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
- !ep->valid ||
- !ep->client_notify)) {
- IPAERR_RL("drop pipe=%d ep_valid=%d client_notify=%pK\n",
- src_pipe, ep->valid, ep->client_notify);
+ if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes)) {
+ IPAERR("drop pipe=%d\n", src_pipe);
dev_kfree_skb_any(rx_skb);
return;
}
@@ -3108,7 +3106,12 @@ void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
metadata, *(u32 *)rx_skb->cb);
IPADBG_LOW("ucp: %d\n", *(u8 *)(rx_skb->cb + 4));
- ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+ if (likely((!atomic_read(&ep->disconnect_in_progress)) &&
+ ep->valid && ep->client_notify))
+ ep->client_notify(ep->priv, IPA_RECEIVE,
+ (unsigned long)(rx_skb));
+ else
+ dev_kfree_skb_any(rx_skb);
}
static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
@@ -3720,7 +3723,16 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
} else if (in->client == IPA_CLIENT_ODL_DPL_CONS) {
IPADBG("assigning policy to ODL client:%d\n",
in->client);
- sys->ep->status.status_en = true;
+ /* Status enabling is needed for DPLv2 with
+ * IPA versions < 4.5.
+ * Dont enable ipa_status for APQ, since MDM IPA
+ * has IPA >= 4.5 with DPLv3.
+ */
+ if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+ ipa3_is_mhip_offload_enabled())
+ sys->ep->status.status_en = false;
+ else
+ sys->ep->status.status_en = true;
sys->policy = IPA_POLICY_INTR_POLL_MODE;
INIT_WORK(&sys->work, ipa3_wq_handle_rx);
INIT_DELAYED_WORK(&sys->switch_to_intr_work,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 1e05cb1bfccc..bcec43951374 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -916,7 +916,7 @@ struct ipa3_ep_context {
struct ipa3_wlan_stats wstats;
u32 uc_offload_state;
u32 gsi_offload_state;
- bool disconnect_in_progress;
+ atomic_t disconnect_in_progress;
u32 qmi_request_sent;
u32 eot_in_poll_err;
bool ep_delay_set;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
index d5de3513e494..c840e1d817fa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi_proxy.c
@@ -648,7 +648,8 @@ struct ipa_mhi_clk_vote_resp_msg_v01
* executed from mhi context.
*/
if (vote) {
- ret = mhi_device_get_sync(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
+ ret = mhi_device_get_sync(imp_ctx->md.mhi_dev,
+ MHI_VOTE_BUS | MHI_VOTE_DEVICE);
if (ret) {
IMP_ERR("mhi_sync_get failed %d\n", ret);
resp->resp.result = IPA_QMI_RESULT_FAILURE_V01;
@@ -658,7 +659,8 @@ struct ipa_mhi_clk_vote_resp_msg_v01
return resp;
}
} else {
- mhi_device_put(imp_ctx->md.mhi_dev, MHI_VOTE_BUS);
+ mhi_device_put(imp_ctx->md.mhi_dev,
+ MHI_VOTE_BUS | MHI_VOTE_DEVICE);
}
mutex_lock(&imp_ctx->mutex);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index f1243e1ccf10..62f24009e6ab 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -2187,8 +2187,12 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
case IPA_MPM_TETH_INIT:
if (ul_prod != IPA_CLIENT_MAX) {
/* No teth started yet, disable UL channel */
- ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
- probe_id, MPM_MHIP_STOP);
+ ret = ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (ret) {
+ IPA_MPM_ERR("MHIP Stop channel err = %d\n",
+ ret);
+ goto fail_stop_channel;
+ }
}
if (is_acted)
ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id,
@@ -2228,6 +2232,7 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
fail_gsi_setup:
fail_start_channel:
+fail_stop_channel:
fail_smmu:
if (ipa_mpm_ctx->dev_info.ipa_smmu_enabled)
IPA_MPM_DBG("SMMU failed\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
index 2c2c2336b3b5..01bb8f3fe3e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_odl.c
@@ -294,10 +294,6 @@ int ipa_setup_odl_pipe(void)
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
- if (ipa3_is_mhip_offload_enabled()) {
- IPADBG("MHIP is enabled, disable aggregation for ODL pipe");
- ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
- }
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
IPA_ODL_AGGR_BYTE_LIMIT;
@@ -323,6 +319,19 @@ int ipa_setup_odl_pipe(void)
ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE *
IPA_FIFO_ELEMENT_SIZE;
ipa3_odl_ctx->odl_client_hdl = -1;
+
+ /* For MHIP, ODL functionality is DMA. So bypass aggregation, checksum
+ * offload, hdr_len.
+ */
+ if (ipa3_ctx->platform_type == IPA_PLAT_TYPE_APQ &&
+ ipa3_is_mhip_offload_enabled()) {
+ IPADBG("MHIP enabled: bypass aggr + csum offload for ODL");
+ ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+ IPA_DISABLE_CS_OFFLOAD;
+ ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 0;
+ }
+
ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg,
&ipa3_odl_ctx->odl_client_hdl);
return ret;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index e8ddd44703a1..30169a554fee 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -1548,6 +1548,11 @@ static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
ind.master_driver_init_status.result =
IPA_QMI_RESULT_SUCCESS_V01;
+ if (unlikely(!ipa3_svc_handle)) {
+ IPAWANERR("Invalid svc handle.Ignore sending ind.");
+ return;
+ }
+
rc = qmi_send_indication(ipa3_svc_handle,
&ipa3_qmi_ctx->client_sq,
QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index a5006b163817..0da387ec3f22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -302,6 +302,7 @@ static void ipa3_uc_save_dbg_stats(u32 size)
break;
default:
IPAERR("unknown protocols %d\n", prot_id);
+ goto unmap;
}
return;
unmap:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index ffe4a32c252d..c35c54e78e86 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -556,6 +556,9 @@ int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
return -EFAULT;
}
+ atomic_set(&ep_ul->disconnect_in_progress, 1);
+ atomic_set(&ep_dl->disconnect_in_progress, 1);
+
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
cmd.size = sizeof(*cmd_data_v4_0);
else
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 1aaf53febb0b..e60402b0cd55 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2471,6 +2471,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+ [IPA_4_5_MHI][IPA_CLIENT_APPS_WAN_PROD] = {
+ true, IPA_v4_5_MHI_GROUP_DDR,
+ true,
+ IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+ QMB_MASTER_SELECT_DDR,
+ { 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 7 } },
[IPA_4_5_MHI][IPA_CLIENT_Q6_WAN_PROD] = {
true, IPA_v4_5_MHI_GROUP_DDR,
true,
@@ -2527,6 +2533,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
+ [IPA_4_5_MHI][IPA_CLIENT_APPS_WAN_CONS] = {
+ true, IPA_v4_5_MHI_GROUP_DDR,
+ false,
+ IPA_DPS_HPS_SEQ_TYPE_INVALID,
+ QMB_MASTER_SELECT_DDR,
+ { 25, 16, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_MHI][IPA_CLIENT_USB_DPL_CONS] = {
true, IPA_v4_5_MHI_GROUP_DDR,
false,
@@ -7229,15 +7241,11 @@ static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
client_type);
}
}
- if (IPA_CLIENT_IS_PROD(ep->client)) {
- IPADBG("Calling gsi_stop_channel ch:%lu\n",
- ep->gsi_chan_hdl);
- res = gsi_stop_channel(ep->gsi_chan_hdl);
- IPADBG("gsi_stop_channel ch: %lu returned %d\n",
- ep->gsi_chan_hdl, res);
- return res;
- }
+ /*
+ * Apply the GSI stop retry logic if GSI returns err code to retry.
+ * Apply the retry logic for ipa_client_prod as well as ipa_client_cons.
+ */
for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
IPADBG("Calling gsi_stop_channel ch:%lu\n",
ep->gsi_chan_hdl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index eb1e81f9f0b7..27ad3c2a47bf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -933,9 +933,8 @@ static void ipa_pkt_status_parse(
opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
break;
default:
- IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+ IPAHAL_ERR_RL("unsupported Status Opcode 0x%x\n",
hw_status->status_opcode);
- WARN_ON(1);
};
status->status_opcode = opcode;
@@ -950,9 +949,8 @@ static void ipa_pkt_status_parse(
status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
break;
default:
- IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+ IPAHAL_ERR_RL("unsupported Status NAT type 0x%x\n",
hw_status->nat_type);
- WARN_ON(1);
};
switch (hw_status->exception) {
@@ -984,9 +982,8 @@ static void ipa_pkt_status_parse(
exception_type = IPAHAL_PKT_STATUS_EXCEPTION_CSUM;
break;
default:
- IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+ IPAHAL_ERR_RL("unsupported Status Exception type 0x%x\n",
hw_status->exception);
- WARN_ON(1);
};
status->exception = exception_type;
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 99549ff37346..1730526712db 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -1686,9 +1686,6 @@ static int mhi_dev_abort(struct mhi_dev *mhi)
flush_workqueue(mhi->ring_init_wq);
flush_workqueue(mhi->pending_ring_wq);
- /* Initiate MHI IPA reset */
- ipa_mhi_destroy();
-
/* Clean up initialized channels */
rc = mhi_deinit(mhi);
if (rc) {
@@ -2282,6 +2279,7 @@ free_ereqs:
ch->ereqs = NULL;
free_client:
kfree(*handle_client);
+ *handle_client = NULL;
exit:
mutex_unlock(&ch->ch_lock);
return rc;
@@ -2293,6 +2291,11 @@ int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
struct mhi_dev_channel *ch;
int rc;
+ if (!handle) {
+ mhi_log(MHI_MSG_ERROR, "Invalid channel access\n");
+ return -EINVAL;
+ }
+
ch = handle->channel;
if (!ch)
return -EINVAL;
@@ -2308,6 +2311,11 @@ int mhi_dev_close_channel(struct mhi_dev_client *handle)
struct mhi_dev_channel *ch;
int rc = 0;
+ if (!handle) {
+ mhi_log(MHI_MSG_ERROR, "Invalid channel access\n");
+ return -EINVAL;
+ }
+
ch = handle->channel;
mutex_lock(&ch->ch_lock);
diff --git a/drivers/platform/msm/mhi_dev/mhi_sm.c b/drivers/platform/msm/mhi_dev/mhi_sm.c
index 7843aa3e10d5..59bf441a827b 100644
--- a/drivers/platform/msm/mhi_dev/mhi_sm.c
+++ b/drivers/platform/msm/mhi_dev/mhi_sm.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/debugfs.h>
#include <linux/ipa_mhi.h>
+#include <linux/msm_ep_pcie.h>
#include "mhi_hwio.h"
#include "mhi_sm.h"
#include <linux/interrupt.h>
@@ -599,15 +600,22 @@ exit:
static int mhi_sm_wakeup_host(enum mhi_dev_event event)
{
int res = 0;
+ enum ep_pcie_event pcie_event;
MHI_SM_FUNC_ENTRY();
if (mhi_sm_ctx->mhi_state == MHI_DEV_M3_STATE) {
/*
- * ep_pcie driver is responsible to send the right wakeup
- * event, assert WAKE#, according to Link state
+ * Check and send D3_HOT to enable waking up the host
+ * using inband PME.
*/
- res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle);
+ if (mhi_sm_ctx->d_state == MHI_SM_EP_PCIE_D3_HOT_STATE)
+ pcie_event = EP_PCIE_EVENT_PM_D3_HOT;
+ else
+ pcie_event = EP_PCIE_EVENT_PM_D3_COLD;
+
+ res = ep_pcie_wakeup_host(mhi_sm_ctx->mhi_dev->phandle,
+ pcie_event);
if (res) {
MHI_SM_ERR("Failed to wakeup MHI host, returned %d\n",
res);
@@ -960,6 +968,8 @@ int mhi_dev_sm_exit(struct mhi_dev *mhi_dev)
mhi_sm_debugfs_destroy();
flush_workqueue(mhi_sm_ctx->mhi_sm_wq);
destroy_workqueue(mhi_sm_ctx->mhi_sm_wq);
+ /* Initiate MHI IPA reset */
+ ipa_mhi_destroy();
ipa_dma_destroy();
mutex_destroy(&mhi_sm_ctx->mhi_state_lock);
devm_kfree(mhi_dev->dev, mhi_sm_ctx);
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 4e4c66b1e526..6cf93af9e4b1 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -151,7 +151,7 @@ static const struct chan_attr uci_chan_attr_table[] = {
TRB_MAX_DATA_SIZE,
MAX_NR_TRBS_PER_CHAN,
MHI_DIR_OUT,
- mhi_uci_generic_client_cb,
+ NULL,
NULL,
NULL,
false,
@@ -162,7 +162,7 @@ static const struct chan_attr uci_chan_attr_table[] = {
TRB_MAX_DATA_SIZE,
MAX_NR_TRBS_PER_CHAN,
MHI_DIR_IN,
- mhi_uci_generic_client_cb,
+ NULL,
NULL,
NULL,
false,
@@ -914,9 +914,14 @@ static int mhi_uci_client_open(struct inode *mhi_inode,
"Opening channels client %d\n",
iminor(mhi_inode));
rc = open_client_mhi_channels(uci_handle);
- if (rc) {
+ if (rc < 0) {
uci_log(UCI_DBG_INFO,
"Failed to open channels ret %d\n", rc);
+ if (atomic_sub_return(1, &uci_handle->ref_count)
+ == 0) {
+ uci_log(UCI_DBG_INFO,
+ "Closing failed channel\n");
+ }
return rc;
}
}
@@ -1148,7 +1153,16 @@ static ssize_t mhi_uci_client_read(struct file *file, char __user *ubuf,
ssize_t bytes_copied = 0;
u32 addr_offset = 0;
+ if (!file || !ubuf || !file->private_data) {
+ uci_log(UCI_DBG_DBG, "Invalid access to read\n");
+ return -EINVAL;
+ }
+
uci_handle = file->private_data;
+ if (!uci_handle->read || !uci_handle->in_handle) {
+ uci_log(UCI_DBG_DBG, "Invalid inhandle or read\n");
+ return -EINVAL;
+ }
mutex = &uci_handle->in_chan_lock;
mutex_lock(mutex);
@@ -1216,12 +1230,16 @@ static ssize_t mhi_uci_client_write(struct file *file,
unsigned long memcpy_result;
int rc;
- if (file == NULL || buf == NULL ||
- !count || file->private_data == NULL)
+ if (!file || !buf || !count || !file->private_data) {
+ uci_log(UCI_DBG_DBG, "Invalid access to write\n");
return -EINVAL;
+ }
uci_handle = file->private_data;
-
+ if (!uci_handle->send || !uci_handle->out_handle) {
+ uci_log(UCI_DBG_DBG, "Invalid handle or send\n");
+ return -EINVAL;
+ }
if (atomic_read(&uci_ctxt.mhi_disabled)) {
uci_log(UCI_DBG_ERROR,
"Client %d attempted to write while MHI is disabled\n",
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 835899e37842..89f461de952d 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -45,6 +45,8 @@
#define MAX_CLK_PERF_LEVEL 32
static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000,
100000000, 150000000, 200000000, 236000000};
+/* SCM Call Id */
+#define SSR_SCM_CMD 0x1
struct bus_vectors {
int src;
@@ -89,6 +91,7 @@ struct bus_vectors {
* @update: Usecase index for icb voting.
* @vote_for_bw: To check if we have to vote for BW or BCM threashold
in ab/ib ICB voting.
+ * @struct ssc_qup_ssr: Structure to represent SSC Qupv3 SSR Structure.
*/
struct geni_se_device {
struct device *dev;
@@ -125,6 +128,7 @@ struct geni_se_device {
struct msm_bus_scale_pdata *pdata;
int update;
bool vote_for_bw;
+ struct ssc_qup_ssr ssr;
};
#define HW_VER_MAJOR_MASK GENMASK(31, 28)
@@ -351,6 +355,101 @@ static int geni_se_select_fifo_mode(void __iomem *base)
return 0;
}
+static ssize_t ssc_qup_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct geni_se_device *geni_se_dev = dev_get_drvdata(dev);
+
+ return snprintf(buf, sizeof(int), "%d\n",
+ !geni_se_dev->ssr.is_ssr_down);
+}
+
+static DEVICE_ATTR_RO(ssc_qup_state);
+
+static void geni_se_ssc_qup_down(struct geni_se_device *dev)
+{
+ struct se_geni_rsc *rsc = NULL;
+
+ dev->ssr.is_ssr_down = true;
+ list_for_each_entry(rsc, &dev->ssr.active_list_head,
+ rsc_ssr.active_list) {
+ rsc->rsc_ssr.force_suspend(rsc->ctrl_dev);
+ }
+}
+
+static void geni_se_ssc_qup_up(struct geni_se_device *dev)
+{
+ int ret = 0;
+ struct scm_desc desc;
+ struct se_geni_rsc *rsc = NULL;
+
+ /* Passing dummy argument as it is scm call requirement */
+ desc.args[0] = 0x0;
+ desc.arginfo = SCM_ARGS(1, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(TZ_SVC_QUP_FW_LOAD, SSR_SCM_CMD), &desc);
+ if (ret) {
+ dev_err(dev->dev, "Unable to load firmware after SSR\n");
+ return;
+ }
+
+ list_for_each_entry(rsc, &dev->ssr.active_list_head,
+ rsc_ssr.active_list) {
+ rsc->rsc_ssr.force_resume(rsc->ctrl_dev);
+ }
+
+ dev->ssr.is_ssr_down = false;
+}
+
+static int geni_se_ssr_notify_block(struct notifier_block *n,
+ unsigned long code, void *_cmd)
+{
+ struct ssc_qup_nb *ssc_qup_nb = container_of(n, struct ssc_qup_nb, nb);
+ struct ssc_qup_ssr *ssr = container_of(ssc_qup_nb, struct ssc_qup_ssr,
+ ssc_qup_nb);
+ struct geni_se_device *dev = container_of(ssr, struct geni_se_device,
+ ssr);
+
+ switch (code) {
+ case SUBSYS_BEFORE_SHUTDOWN:
+ geni_se_ssc_qup_down(dev);
+ GENI_SE_DBG(dev->log_ctx, false, NULL,
+ "SSR notification before power down\n");
+ break;
+ case SUBSYS_AFTER_POWERUP:
+ if (dev->ssr.probe_completed)
+ geni_se_ssc_qup_up(dev);
+ else
+ dev->ssr.probe_completed = true;
+
+ GENI_SE_DBG(dev->log_ctx, false, NULL,
+ "SSR notification after power up\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int geni_se_ssc_qup_ssr_reg(struct geni_se_device *dev)
+{
+ dev->ssr.ssc_qup_nb.nb.notifier_call = geni_se_ssr_notify_block;
+ dev->ssr.ssc_qup_nb.next = subsys_notif_register_notifier(
+ dev->ssr.subsys_name, &dev->ssr.ssc_qup_nb.nb);
+
+ if (IS_ERR_OR_NULL(dev->ssr.ssc_qup_nb.next)) {
+ dev_err(dev->dev,
+ "subsys_notif_register_notifier failed %ld\n",
+ PTR_ERR(dev->ssr.ssc_qup_nb.next));
+ return PTR_ERR(dev->ssr.ssc_qup_nb.next);
+ }
+
+ GENI_SE_DBG(dev->log_ctx, false, NULL, "SSR registration done\n");
+
+ return 0;
+}
+
static int geni_se_select_dma_mode(void __iomem *base)
{
int proto = get_se_proto(base);
@@ -1096,6 +1195,12 @@ int geni_se_resources_init(struct se_geni_rsc *rsc,
INIT_LIST_HEAD(&rsc->ab_list);
INIT_LIST_HEAD(&rsc->ib_list);
+ if (geni_se_dev->ssr.subsys_name && rsc->rsc_ssr.ssr_enable) {
+ INIT_LIST_HEAD(&rsc->rsc_ssr.active_list);
+ list_add(&rsc->rsc_ssr.active_list,
+ &geni_se_dev->ssr.active_list_head);
+ }
+
ret = geni_se_iommu_map_and_attach(geni_se_dev);
if (ret)
GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
@@ -1836,13 +1941,27 @@ static int geni_se_probe(struct platform_device *pdev)
ret = of_platform_populate(dev->of_node, geni_se_dt_match, NULL, dev);
if (ret) {
dev_err(dev, "%s: Error populating children\n", __func__);
- devm_iounmap(dev, geni_se_dev->base);
- devm_kfree(dev, geni_se_dev);
+ return ret;
+ }
+
+ ret = of_property_read_string(geni_se_dev->dev->of_node,
+ "qcom,subsys-name", &geni_se_dev->ssr.subsys_name);
+ if (!ret) {
+ INIT_LIST_HEAD(&geni_se_dev->ssr.active_list_head);
+ geni_se_dev->ssr.probe_completed = false;
+ ret = geni_se_ssc_qup_ssr_reg(geni_se_dev);
+ if (ret) {
+ dev_err(dev, "Unable to register SSR notification\n");
+ return ret;
+ }
+
+ sysfs_create_file(&geni_se_dev->dev->kobj,
+ &dev_attr_ssc_qup_state.attr);
}
GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
"%s: Probe successful\n", __func__);
- return ret;
+ return 0;
}
static int geni_se_remove(struct platform_device *pdev)
@@ -1854,6 +1973,13 @@ static int geni_se_remove(struct platform_device *pdev)
arm_iommu_detach_device(geni_se_dev->cb_dev);
arm_iommu_release_mapping(geni_se_dev->iommu_map);
}
+ if (geni_se_dev->ssr.subsys_name) {
+ subsys_notif_unregister_notifier(
+ geni_se_dev->ssr.ssc_qup_nb.next,
+ &geni_se_dev->ssr.ssc_qup_nb.nb);
+ sysfs_remove_file(&geni_se_dev->dev->kobj,
+ &dev_attr_ssc_qup_state.attr);
+ }
ipc_log_context_destroy(geni_se_dev->log_ctx);
devm_iounmap(dev, geni_se_dev->base);
devm_kfree(dev, geni_se_dev);
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 89b28ae38d7d..36a29ad49289 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -197,6 +197,9 @@ static int cp_get_parallel_mode(struct pl_data *chip, int mode)
*/
static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
{
+ int rc, fcc;
+ union power_supply_propval pval = {0, };
+
if (!is_cp_available(chip))
return;
@@ -204,11 +207,28 @@ static void cp_configure_ilim(struct pl_data *chip, const char *voter, int ilim)
== POWER_SUPPLY_PL_OUTPUT_VPH)
return;
+ rc = power_supply_get_property(chip->cp_master_psy,
+ POWER_SUPPLY_PROP_MIN_ICL, &pval);
+ if (rc < 0)
+ return;
+
if (!chip->cp_ilim_votable)
chip->cp_ilim_votable = find_votable("CP_ILIM");
- if (chip->cp_ilim_votable)
- vote(chip->cp_ilim_votable, voter, true, ilim);
+ if (chip->cp_ilim_votable) {
+ fcc = get_effective_result_locked(chip->fcc_votable);
+ /*
+ * If FCC >= (2 * MIN_ICL) then it is safe to enable CP
+ * with MIN_ICL.
+ * Configure ILIM as follows:
+ * if request_ilim < MIN_ICL cofigure ILIM to MIN_ICL.
+ * otherwise configure ILIM to requested_ilim.
+ */
+ if ((fcc >= (pval.intval * 2)) && (ilim < pval.intval))
+ vote(chip->cp_ilim_votable, voter, true, pval.intval);
+ else
+ vote(chip->cp_ilim_votable, voter, true, ilim);
+ }
}
/*******
@@ -622,7 +642,7 @@ static void get_fcc_stepper_params(struct pl_data *chip, int main_fcc_ua,
|| (total_fcc_ua != chip->total_fcc_ua)) {
chip->override_main_fcc_ua = main_set_fcc_ua;
chip->total_fcc_ua = total_fcc_ua;
- parallel_fcc_ua += (main_fcc_ua
+ parallel_fcc_ua = (total_fcc_ua
- chip->override_main_fcc_ua);
} else {
goto skip_fcc_step_update;
@@ -1367,6 +1387,13 @@ static int pl_disable_vote_callback(struct votable *votable,
if (chip->step_fcc) {
vote(chip->pl_awake_votable, FCC_STEPPER_VOTER,
true, 0);
+ /*
+ * Configure ILIM above min ILIM of CP to
+ * ensure CP is not disabled due to ILIM vote.
+ * Later FCC stepper will take to ILIM to
+ * target value.
+ */
+ cp_configure_ilim(chip, FCC_VOTER, 0);
schedule_delayed_work(&chip->fcc_stepper_work,
0);
}
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 009294c4f82f..2eff0891a32e 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -1042,9 +1042,13 @@ static void smb1390_status_change_work(struct work_struct *work)
if (!is_psy_voter_available(chip))
goto out;
- if (!smb1390_is_adapter_cc_mode(chip))
- vote(chip->disable_votable, SOC_LEVEL_VOTER,
- smb1390_is_batt_soc_valid(chip) ? false : true, 0);
+ /*
+ * If batt soc is not valid upon bootup, but becomes
+ * valid due to the battery discharging later, remove
+ * vote from SOC_LEVEL_VOTER.
+ */
+ if (smb1390_is_batt_soc_valid(chip))
+ vote(chip->disable_votable, SOC_LEVEL_VOTER, false, 0);
rc = power_supply_get_property(chip->usb_psy,
POWER_SUPPLY_PROP_SMB_EN_MODE, &pval);
@@ -1152,11 +1156,14 @@ static int smb1390_validate_slave_chg_taper(struct smb1390 *chip, int fcc_uA)
smb1390_dbg(chip, PR_INFO, "Set Master ILIM to MAX, post Slave disable in taper, fcc=%d\n",
fcc_uA);
vote_override(chip->ilim_votable, CC_MODE_VOTER,
- true, MAX_ILIM_DUAL_CP_UA);
+ smb1390_is_adapter_cc_mode(chip),
+ MAX_ILIM_DUAL_CP_UA);
+
if (chip->usb_icl_votable)
vote_override(chip->usb_icl_votable,
TAPER_MAIN_ICL_LIMIT_VOTER,
- true, chip->cc_mode_taper_main_icl_ua);
+ smb1390_is_adapter_cc_mode(chip),
+ chip->cc_mode_taper_main_icl_ua);
}
return rc;
@@ -1380,7 +1387,7 @@ static int smb1390_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CP_ILIM:
if (chip->ilim_votable)
vote_override(chip->ilim_votable, CC_MODE_VOTER,
- true, val->intval);
+ (val->intval > 0), val->intval);
break;
default:
smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index c5fe4784fa21..8bb1f54455d1 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -179,7 +179,8 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
return;
spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
- if (!qcom_host->req_pending) {
+ if (!qcom_host->req_pending ||
+ ufshcd_is_shutdown_ongoing(qcom_host->hba)) {
qcom_host->work_pending = false;
spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
return;
@@ -227,7 +228,7 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
if (!ice_workqueue) {
ice_workqueue = alloc_workqueue("ice-set-key",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0);
if (!ice_workqueue) {
dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
__func__);
@@ -610,6 +611,7 @@ out:
return err;
}
+
/**
* ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power
* collapse
@@ -654,6 +656,28 @@ out:
}
/**
+ * ufs_qcom_is_ice_busy() - lets the caller of the function know if
+ * there is any ongoing operation in ICE in workqueue context.
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host should be a valid pointer.
+ *
+ * Return: 1 if ICE is busy, 0 if it is free.
+ * -EINVAL in case of error.
+ */
+int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
+{
+ if (!qcom_host) {
+ pr_err("%s: invalid qcom_host %pK", __func__, qcom_host);
+ return -EINVAL;
+ }
+
+ if (qcom_host->req_pending)
+ return 1;
+ else
+ return 0;
+}
+
+/**
* ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
* @qcom_host: Pointer to a UFS QCom internal host structure.
* qcom_host, qcom_host->hba and qcom_host->hba->dev should all
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h
index eb0291612049..88ffeb35f9f3 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.h
+++ b/drivers/scsi/ufs/ufs-qcom-ice.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,7 @@ int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host);
#else
inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
{
@@ -127,6 +128,10 @@ inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
{
return;
}
+inline int ufs_qcom_is_ice_busy(struct ufs_qcom_host *qcom_host)
+{
+ return 0;
+}
#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
#endif /* UFS_QCOM_ICE_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3587dbbd3a0a..a919d52cf97c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1018,12 +1018,27 @@ static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
return ufs_qcom_ice_get_status(host, status);
}
+
+static int ufs_qcom_crypto_get_pending_req_status(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err = 0;
+
+ if (!host->ice.pdev)
+ goto out;
+
+ err = ufs_qcom_is_ice_busy(host);
+out:
+ return err;
+}
+
#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
#define ufs_qcom_crypto_req_setup NULL
#define ufs_qcom_crytpo_engine_cfg_start NULL
#define ufs_qcom_crytpo_engine_cfg_end NULL
#define ufs_qcom_crytpo_engine_reset NULL
#define ufs_qcom_crypto_engine_get_status NULL
+#define ufs_qcom_crypto_get_pending_req_status NULL
#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
struct ufs_qcom_dev_params {
@@ -2807,6 +2822,7 @@ static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
.crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
.crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
+ .crypto_get_req_status = ufs_qcom_crypto_get_pending_req_status,
};
static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ecfe924c1738..ab53e5ff072f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3699,9 +3699,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = ufshcd_get_read_lock(hba, cmd->device->lun);
if (unlikely(err < 0)) {
if (err == -EPERM) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- return 0;
+ if (!ufshcd_vops_crypto_engine_get_req_status(hba)) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ return 0;
+ } else {
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
}
if (err == -EAGAIN)
return SCSI_MLQUEUE_HOST_BUSY;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 76d5f89de303..fa4b2e7883d9 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -379,6 +379,7 @@ struct ufs_hba_variant_ops {
* according to tag parameter
* @crypto_engine_reset: perform reset to the cryptographic engine
* @crypto_engine_get_status: get errors status of the cryptographic engine
+ * @crypto_get_req_status: Check if crypto driver still holds request or not
*/
struct ufs_hba_crypto_variant_ops {
int (*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
@@ -388,6 +389,7 @@ struct ufs_hba_crypto_variant_ops {
struct request *);
int (*crypto_engine_reset)(struct ufs_hba *);
int (*crypto_engine_get_status)(struct ufs_hba *, u32 *);
+ int (*crypto_get_req_status)(struct ufs_hba *);
};
/**
@@ -1535,7 +1537,6 @@ static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba)
static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
u32 *status)
-
{
if (hba->var && hba->var->crypto_vops &&
hba->var->crypto_vops->crypto_engine_get_status)
@@ -1559,4 +1560,13 @@ static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
hba->var->pm_qos_vops->req_end(hba, req, lock);
}
+static inline int ufshcd_vops_crypto_engine_get_req_status(struct ufs_hba *hba)
+
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_get_req_status)
+ return hba->var->crypto_vops->crypto_get_req_status(hba);
+ return 0;
+}
+
#endif /* End of Header */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 8365a9777544..7a85683c75ee 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -490,6 +490,16 @@ config MSM_GLADIATOR_ERP
If unsure, say N.
+config SDX_EXT_IPC
+ tristate "QCOM external ipc driver"
+ help
+ This enables the module to help modem communicate with external
+ Application processor connected to Qualcomm Technologies, Inc
+ modem chipset. The modem and APQ can understand each other's
+ state by reading ipc gpios.
+
+ If unsure, say N.
+
config PANIC_ON_GLADIATOR_ERROR
depends on MSM_GLADIATOR_ERP
bool "Panic on GLADIATOR error report"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 2109b80824e7..1cb52b67d1a0 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -64,6 +64,7 @@ obj-$(MSM_REMOTEQDSS) += remoteqdss.o
obj-$(CONFIG_MSM_JTAGV8) += jtagv8.o jtagv8-etm.o
obj-$(CONFIG_MSM_CDSP_LOADER) += qdsp6v2/
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
+obj-$(CONFIG_SDX_EXT_IPC) += sdx_ext_ipc.o
ifdef CONFIG_MSM_SUBSYSTEM_RESTART
obj-y += subsystem_notif.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index aa7715cf1815..33aab345d89d 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -576,13 +576,15 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
return -EINVAL;
if (drvdata->enable[curr_list]) {
- dev_err(drvdata->dev, "DCC is already enabled\n");
+ dev_err(drvdata->dev, "List %d is already enabled\n",
+ curr_list);
return -EINVAL;
}
lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(curr_list));
if (lock_reg & 0x1) {
- dev_err(drvdata->dev, "DCC is already enabled\n");
+ dev_err(drvdata->dev, "List %d is already locked\n",
+ curr_list);
return -EINVAL;
}
@@ -590,6 +592,21 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
return 0;
}
+static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
+{
+ bool dcc_enable = false;
+ int list;
+
+ for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
+ if (drvdata->enable[list]) {
+ dcc_enable = true;
+ break;
+ }
+ }
+
+ return dcc_enable;
+}
+
static int dcc_enable(struct dcc_drvdata *drvdata)
{
int ret = 0;
@@ -598,7 +615,9 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
mutex_lock(&drvdata->mutex);
- memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+ if (!is_dcc_enabled(drvdata)) {
+ memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+ }
for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
@@ -680,21 +699,6 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
mutex_unlock(&drvdata->mutex);
}
-static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
-{
- bool dcc_enable = false;
- int list;
-
- for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
- if (drvdata->enable[list]) {
- dcc_enable = true;
- break;
- }
- }
-
- return dcc_enable;
-}
-
static ssize_t dcc_show_curr_list(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 126a31435df9..0ae9758d5008 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -131,6 +131,7 @@ static struct icnss_vreg_info icnss_vreg_info[] = {
{NULL, "vdd-cx-mx", 752000, 752000, 0, 0, false},
{NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
{NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch1", 3312000, 3312000, 0, 0, false},
{NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
};
diff --git a/drivers/soc/qcom/sdx_ext_ipc.c b/drivers/soc/qcom/sdx_ext_ipc.c
new file mode 100644
index 000000000000..2b1801799418
--- /dev/null
+++ b/drivers/soc/qcom/sdx_ext_ipc.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+
+
+enum subsys_policies {
+ SUBSYS_PANIC = 0,
+ SUBSYS_NOP,
+};
+
+static const char * const policies[] = {
+ [SUBSYS_PANIC] = "PANIC",
+ [SUBSYS_NOP] = "NOP",
+};
+
+enum gpios {
+ AP2MDM_STATUS = 0,
+ MDM2AP_STATUS,
+ MDM2AP_STATUS2,
+ NUM_GPIOS,
+};
+
+static const char * const gpio_map[] = {
+ [AP2MDM_STATUS] = "qcom,ap2mdm-status-gpio",
+ [MDM2AP_STATUS] = "qcom,mdm2ap-status-gpio",
+ [MDM2AP_STATUS2] = "qcom,mdm2ap-status2-gpio",
+};
+
+struct gpio_cntrl {
+ unsigned int gpios[NUM_GPIOS];
+ int status_irq;
+ int policy;
+ struct device *dev;
+ struct mutex policy_lock;
+ struct notifier_block panic_blk;
+};
+
+static ssize_t policy_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct gpio_cntrl *mdm = dev_get_drvdata(dev);
+
+ mutex_lock(&mdm->policy_lock);
+ ret = scnprintf(buf, strlen(policies[mdm->policy]) + 1,
+ policies[mdm->policy]);
+ mutex_unlock(&mdm->policy_lock);
+
+ return ret;
+}
+
+static ssize_t policy_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gpio_cntrl *mdm = dev_get_drvdata(dev);
+ const char *p;
+ int i, orig_count = count;
+
+ p = memchr(buf, '\n', count);
+ if (p)
+ count = p - buf;
+
+ for (i = 0; i < ARRAY_SIZE(policies); i++)
+ if (!strncasecmp(buf, policies[i], count)) {
+ mutex_lock(&mdm->policy_lock);
+ mdm->policy = i;
+ mutex_unlock(&mdm->policy_lock);
+ return orig_count;
+ }
+ return -EPERM;
+}
+static DEVICE_ATTR_RW(policy);
+
+static irqreturn_t ap_status_change(int irq, void *dev_id)
+{
+ struct gpio_cntrl *mdm = dev_id;
+ int state;
+ struct gpio_desc *gp_status = gpio_to_desc(mdm->gpios[AP2MDM_STATUS]);
+ int active_low = 0;
+
+ if (gp_status)
+ active_low = gpiod_is_active_low(gp_status);
+
+ state = gpio_get_value(mdm->gpios[AP2MDM_STATUS]);
+ if ((!active_low && !state) || (active_low && state)) {
+ if (mdm->policy)
+ dev_info(mdm->dev, "Host undergoing SSR, leaving SDX as it is\n");
+ else
+ panic("Host undergoing SSR, panicking SDX\n");
+ } else
+ dev_info(mdm->dev, "HOST booted\n");
+
+ return IRQ_HANDLED;
+}
+
+static void remove_ipc(struct gpio_cntrl *mdm)
+{
+ int i;
+
+ for (i = 0; i < NUM_GPIOS; ++i) {
+ if (gpio_is_valid(mdm->gpios[i]))
+ gpio_free(mdm->gpios[i]);
+ }
+}
+
+static int setup_ipc(struct gpio_cntrl *mdm)
+{
+ int i, val, ret, irq;
+ struct device_node *node;
+
+ node = mdm->dev->of_node;
+ for (i = 0; i < ARRAY_SIZE(gpio_map); i++) {
+ val = of_get_named_gpio(node, gpio_map[i], 0);
+ if (val >= 0)
+ mdm->gpios[i] = val;
+ }
+
+ ret = gpio_request(mdm->gpios[AP2MDM_STATUS], "AP2MDM_STATUS");
+ if (ret) {
+ dev_err(mdm->dev, "Failed to configure AP2MDM_STATUS gpio\n");
+ return ret;
+ }
+ gpio_direction_input(mdm->gpios[AP2MDM_STATUS]);
+
+ ret = gpio_request(mdm->gpios[MDM2AP_STATUS], "MDM2AP_STATUS");
+ if (ret) {
+ dev_err(mdm->dev, "Failed to configure MDM2AP_STATUS gpio\n");
+ return ret;
+ }
+ gpio_direction_output(mdm->gpios[MDM2AP_STATUS], 1);
+
+ ret = gpio_request(mdm->gpios[MDM2AP_STATUS2], "MDM2AP_STATUS2");
+ if (ret) {
+ dev_err(mdm->dev, "Failed to configure MDM2AP_STATUS2 gpio\n");
+ return ret;
+ }
+ gpio_direction_output(mdm->gpios[MDM2AP_STATUS2], 0);
+
+ irq = gpio_to_irq(mdm->gpios[AP2MDM_STATUS]);
+ if (irq < 0) {
+ dev_err(mdm->dev, "bad AP2MDM_STATUS IRQ resource\n");
+ return irq;
+ }
+ mdm->status_irq = irq;
+
+ return 0;
+}
+
+static int sdx_ext_ipc_panic(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct gpio_cntrl *mdm = container_of(this,
+ struct gpio_cntrl, panic_blk);
+
+ gpio_set_value(mdm->gpios[MDM2AP_STATUS], 0);
+
+ return NOTIFY_DONE;
+}
+
+static int sdx_ext_ipc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node;
+ struct gpio_cntrl *mdm;
+
+ node = pdev->dev.of_node;
+ mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
+ if (!mdm)
+ return -ENOMEM;
+
+ mdm->dev = &pdev->dev;
+ ret = setup_ipc(mdm);
+ if (ret) {
+ dev_err(mdm->dev, "Error setting up gpios\n");
+ devm_kfree(&pdev->dev, mdm);
+ return ret;
+ }
+
+ mdm->panic_blk.notifier_call = sdx_ext_ipc_panic;
+ atomic_notifier_chain_register(&panic_notifier_list, &mdm->panic_blk);
+
+ mutex_init(&mdm->policy_lock);
+ mdm->policy = SUBSYS_PANIC;
+
+ ret = device_create_file(mdm->dev, &dev_attr_policy);
+ if (ret) {
+ dev_err(mdm->dev, "cannot create sysfs attribute\n");
+ goto sys_fail;
+ }
+
+ platform_set_drvdata(pdev, mdm);
+
+ ret = devm_request_irq(mdm->dev, mdm->status_irq, ap_status_change,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
+ "ap status", mdm);
+ if (ret < 0) {
+ dev_err(mdm->dev, "%s: AP2MDM_STATUS IRQ#%d request failed,\n",
+ __func__, mdm->status_irq);
+ goto irq_fail;
+ }
+ irq_set_irq_wake(mdm->status_irq, 1);
+ return 0;
+
+irq_fail:
+ device_remove_file(mdm->dev, &dev_attr_policy);
+sys_fail:
+ atomic_notifier_chain_unregister(&panic_notifier_list, &mdm->panic_blk);
+ remove_ipc(mdm);
+ devm_kfree(&pdev->dev, mdm);
+ return ret;
+}
+
+static int sdx_ext_ipc_remove(struct platform_device *pdev)
+{
+ struct gpio_cntrl *mdm;
+
+ mdm = dev_get_drvdata(&pdev->dev);
+ disable_irq_wake(mdm->status_irq);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &mdm->panic_blk);
+ remove_ipc(mdm);
+ device_remove_file(mdm->dev, &dev_attr_policy);
+ return 0;
+}
+
+static const struct of_device_id sdx_ext_ipc_of_match[] = {
+ { .compatible = "qcom,sdx-ext-ipc"},
+ {},
+};
+
+static struct platform_driver sdx_ext_ipc_driver = {
+ .probe = sdx_ext_ipc_probe,
+ .remove = sdx_ext_ipc_remove,
+ .driver = {
+ .name = "sdx-ext-ipc",
+ .owner = THIS_MODULE,
+ .of_match_table = sdx_ext_ipc_of_match,
+ },
+};
+
+static int __init sdx_ext_ipc_register(void)
+{
+ return platform_driver_register(&sdx_ext_ipc_driver);
+}
+subsys_initcall(sdx_ext_ipc_register);
+
+static void __exit sdx_ext_ipc_unregister(void)
+{
+ platform_driver_unregister(&sdx_ext_ipc_driver);
+}
+module_exit(sdx_ext_ipc_unregister);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index 72bf30ad9d2d..ef5937dac63d 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -111,6 +111,9 @@
#define MEM_RGN_SRVR_ID 1
#define MEM_MAP_SRVR_ID 2
#define CBOBJ_SERVER_ID_START 0x10
+#define CBOBJ_SERVER_ID_END ((1<<16) - 1)
+/* local obj id is represented by 15 bits */
+#define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
/* CBOBJs will be served by server id 0x10 onwards */
#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
@@ -264,6 +267,9 @@ static struct smcinvoke_server_info *find_cb_server_locked(uint16_t server_id)
static uint16_t next_cb_server_id_locked(void)
{
+ if (g_last_cb_server_id == CBOBJ_SERVER_ID_END)
+ g_last_cb_server_id = CBOBJ_SERVER_ID_START;
+
while (find_cb_server_locked(++g_last_cb_server_id));
return g_last_cb_server_id;
@@ -301,6 +307,9 @@ static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
static uint32_t next_mem_region_obj_id_locked(void)
{
+ if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
+ g_last_mem_rgn_id = 0;
+
while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ));
return g_last_mem_rgn_id;
@@ -308,6 +317,9 @@ static uint32_t next_mem_region_obj_id_locked(void)
static uint32_t next_mem_map_obj_id_locked(void)
{
+ if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
+ g_last_mem_map_obj_id = 0;
+
while (find_mem_obj_locked(++g_last_mem_map_obj_id,
SMCINVOKE_MEM_MAP_OBJ));
@@ -391,10 +403,10 @@ static void free_pending_cbobj_locked(struct kref *kref)
static int get_pending_cbobj_locked(uint16_t srvr_id, int16_t obj_id)
{
- struct smcinvoke_server_info *server = find_cb_server_locked(srvr_id);
struct list_head *head = NULL;
struct smcinvoke_cbobj *cbobj = NULL;
struct smcinvoke_cbobj *obj = NULL;
+ struct smcinvoke_server_info *server = find_cb_server_locked(srvr_id);
if (!server)
return OBJECT_ERROR_BADOBJ;
@@ -462,7 +474,11 @@ static void delete_cb_txn(struct kref *kref)
struct smcinvoke_cb_txn *cb_txn = container_of(kref,
struct smcinvoke_cb_txn, ref_cnt);
+ if (OBJECT_OP_METHODID(cb_txn->cb_req->hdr.op) == OBJECT_OP_RELEASE)
+ release_tzhandle_locked(cb_txn->cb_req->hdr.tzhandle);
+
kfree(cb_txn->cb_req);
+ hash_del(&cb_txn->hash);
kfree(cb_txn);
}
@@ -839,20 +855,13 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
/* ret is going to TZ. Provide values from OBJECT_ERROR_<> */
int ret = OBJECT_ERROR_DEFUNCT;
struct smcinvoke_cb_txn *cb_txn = NULL;
- struct smcinvoke_tzcb_req *cb_req = NULL;
+ struct smcinvoke_tzcb_req *cb_req = NULL, *tmp_cb_req = NULL;
struct smcinvoke_server_info *srvr_info = NULL;
if (buf_len < sizeof(struct smcinvoke_tzcb_req))
return;
- cb_req = kzalloc(buf_len, GFP_KERNEL);
- if (!cb_req) {
- /* we need to return error to caller so fill up result */
- cb_req = buf;
- cb_req->result = OBJECT_ERROR_KMEM;
- return;
- }
- memcpy(cb_req, buf, buf_len);
+ cb_req = buf;
/* check whether it is to be served by kernel or userspace */
if (TZHANDLE_IS_KERNEL_OBJ(cb_req->hdr.tzhandle)) {
@@ -864,11 +873,26 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
return;
}
+ /*
+ * We need a copy of req that could be sent to server. Otherwise, if
+ * someone kills invoke caller, buf would go away and server would be
+ * working on already freed buffer, causing a device crash.
+ */
+ tmp_cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
+ if (!tmp_cb_req) {
+ /* we need to return error to caller so fill up result */
+ cb_req->result = OBJECT_ERROR_KMEM;
+ return;
+ }
+
cb_txn = kzalloc(sizeof(*cb_txn), GFP_KERNEL);
if (!cb_txn) {
- ret = OBJECT_ERROR_KMEM;
- goto out;
+ cb_req->result = OBJECT_ERROR_KMEM;
+ kfree(tmp_cb_req);
+ return;
}
+ /* no need for memcpy as we did kmemdup() above */
+ cb_req = tmp_cb_req;
cb_txn->state = SMCINVOKE_REQ_PLACED;
cb_txn->cb_req = cb_req;
@@ -880,6 +904,7 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
srvr_info = find_cb_server_locked(
TZHANDLE_GET_SERVER(cb_req->hdr.tzhandle));
if (!srvr_info || srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+ /* ret equals Object_ERROR_DEFUNCT, at this point go to out */
mutex_unlock(&g_smcinvoke_lock);
goto out;
}
@@ -887,33 +912,36 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
cb_txn->txn_id = ++srvr_info->txn_id;
hash_add(srvr_info->reqs_table, &cb_txn->hash, cb_txn->txn_id);
mutex_unlock(&g_smcinvoke_lock);
+ /*
+ * we need not worry that server_info will be deleted because as long
+ * as this CBObj is served by this server, srvr_info will be valid.
+ */
wake_up_interruptible(&srvr_info->req_wait_q);
ret = wait_event_interruptible(srvr_info->rsp_wait_q,
(cb_txn->state == SMCINVOKE_REQ_PROCESSED) ||
(srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT));
- if (ret)
- pr_err("%s wait_event interrupted: ret = %d\n", __func__, ret);
out:
/*
- * If we are here, either req is processed or not
- * if processed, result would have been set by txn processor
- * if not processed, we should set result with ret which should have
- * correct value that TZ/TA can understand
+ * we could be here because of either: a. Req is PROCESSED
+ * b. Server was killed c. Invoke thread is killed
+ * sometime invoke thread and server are part of same process.
*/
mutex_lock(&g_smcinvoke_lock);
- if (!cb_txn || (cb_txn->state != SMCINVOKE_REQ_PROCESSED)) {
- cb_req->result = ret;
- if (srvr_info &&
- srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT &&
- OBJECT_OP_METHODID(cb_req->hdr.op) == OBJECT_OP_RELEASE) {
- release_tzhandle_locked(cb_req->hdr.tzhandle);
- }
- }
- if (cb_txn) {
- hash_del(&cb_txn->hash);
- memcpy(buf, cb_req, buf_len);
- kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+ hash_del(&cb_txn->hash);
+ if (cb_txn->state == SMCINVOKE_REQ_PROCESSED) {
+ /*
+ * it is possible that server was killed immediately
+ * after CB Req was processed but who cares now!
+ */
+ } else if (!srvr_info ||
+ srvr_info->state == SMCINVOKE_SERVER_STATE_DEFUNCT) {
+ cb_req->result = OBJECT_ERROR_DEFUNCT;
+ } else {
+ pr_debug("%s wait_event interrupted ret = %d\n", __func__, ret);
+ cb_req->result = OBJECT_ERROR_ABORT;
}
+ memcpy(buf, cb_req, buf_len);
+ kref_put(&cb_txn->ref_cnt, delete_cb_txn);
mutex_unlock(&g_smcinvoke_lock);
}
@@ -1439,24 +1467,26 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
cb_txn = find_cbtxn_locked(server_info, user_args.txn_id,
SMCINVOKE_REQ_PROCESSING);
mutex_unlock(&g_smcinvoke_lock);
- /* cb_txn can be null if userspace provides wrong txn id. */
+ /*
+ * cb_txn can be null if userspace provides wrong txn id OR
+ * invoke thread died while server was processing cb req.
+ * if invoke thread dies, it would remove req from Q. So
+ * no matching cb_txn would be on Q and hence NULL cb_txn.
+ */
if (!cb_txn) {
- pr_err("%s: Invalid txn received = %d\n",
+ pr_err("%s txn %d either invalid or removed from Q\n",
__func__, user_args.txn_id);
goto out;
}
ret = marshal_out_tzcb_req(&user_args, cb_txn,
cb_txn->filp_to_release);
/*
- * if client did not set error and we get error locally
+ * if client did not set error and we get error locally,
* we return local error to TA
*/
if (ret && cb_txn->cb_req->result == 0)
cb_txn->cb_req->result = OBJECT_ERROR_UNAVAIL;
- if (OBJECT_OP_METHODID(user_args.op) == OBJECT_OP_RELEASE)
- release_tzhandles(&cb_txn->cb_req->hdr.tzhandle, 1);
-
cb_txn->state = SMCINVOKE_REQ_PROCESSED;
kref_put(&cb_txn->ref_cnt, delete_cb_txn);
wake_up(&server_info->rsp_wait_q);
@@ -1475,7 +1505,7 @@ static long process_accept_req(struct file *filp, unsigned int cmd,
ret = wait_event_interruptible(server_info->req_wait_q,
!hash_empty(server_info->reqs_table));
if (ret) {
- pr_err("%s wait_event interrupted: ret = %d\n",
+ pr_debug("%s wait_event interrupted: ret = %d\n",
__func__, ret);
goto out;
}
@@ -1530,34 +1560,26 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
bool tz_acked = false;
- if (_IOC_SIZE(cmd) != sizeof(req)) {
- ret = -EINVAL;
- goto out;
- }
- if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
- ret = -EPERM;
- goto out;
- }
+ if (_IOC_SIZE(cmd) != sizeof(req))
+ return -EINVAL;
+
+ if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ)
+ return -EPERM;
+
ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
+ if (ret)
+ return -EFAULT;
+
+ if (req.argsize != sizeof(union smcinvoke_arg))
+ return -EINVAL;
nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
OBJECT_COUNTS_NUM_objects(req.counts);
- if (req.argsize != sizeof(union smcinvoke_arg)) {
- ret = -EINVAL;
- goto out;
- }
-
if (nr_args) {
args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
- if (!args_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!args_buf)
+ return -ENOMEM;
ret = copy_from_user(args_buf, u64_to_user_ptr(req.args),
nr_args * req.argsize);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 721126fc9059..5577567eab5b 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -124,6 +124,12 @@ struct spi_geni_gsi {
struct gsi_desc_cb desc_cb;
};
+struct spi_geni_ssr {
+ struct mutex ssr_lock;
+ bool is_ssr_down;
+ bool xfer_prepared;
+};
+
struct spi_geni_master {
struct se_geni_rsc spi_rsc;
resource_size_t phys_addr;
@@ -163,9 +169,12 @@ struct spi_geni_master {
bool shared_se;
bool dis_autosuspend;
bool cmd_done;
+ struct spi_geni_ssr spi_ssr;
};
static void spi_slv_setup(struct spi_geni_master *mas);
+static int ssr_spi_force_suspend(struct device *dev);
+static int ssr_spi_force_resume(struct device *dev);
static ssize_t show_slave_state(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -788,6 +797,13 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret = 0;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ /* Bail out if prepare_transfer didn't happen due to SSR */
+ if (mas->spi_ssr.is_ssr_down || !mas->spi_ssr.xfer_prepared) {
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+ return -EINVAL;
+ }
+
mas->cur_xfer_mode = select_xfer_mode(spi, spi_msg);
if (mas->cur_xfer_mode < 0) {
@@ -803,6 +819,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
geni_se_select_mode(mas->base, mas->cur_xfer_mode);
ret = setup_fifo_params(spi_msg->spi, spi);
}
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
}
@@ -826,6 +843,19 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
u32 max_speed = spi->cur_msg->spi->max_speed_hz;
struct se_geni_rsc *rsc = &mas->spi_rsc;
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ if (mas->spi_ssr.is_ssr_down) {
+ /*
+ * xfer_prepared will be set to true once prepare_transfer
+ * hardware is complete.
+ * It used in prepare_message and transfer_one to bail out
+ * during SSR.
+ */
+ mas->spi_ssr.xfer_prepared = false;
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+ return 0;
+ }
+
/* Adjust the IB based on the max speed of the slave.*/
rsc->ib = max_speed * DEFAULT_BUS_WIDTH;
if (mas->shared_se) {
@@ -864,6 +894,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
if (unlikely(proto != SPI_SLAVE)) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return -ENXIO;
}
}
@@ -877,6 +908,7 @@ static int spi_geni_prepare_transfer_hardware(struct spi_master *spi)
proto = get_se_proto(mas->base);
if ((unlikely(proto != SPI)) && (!spi->slave)) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return -ENXIO;
}
@@ -968,6 +1000,8 @@ setup_ipc:
"Auto Suspend is disabled\n");
}
exit_prepare_transfer_hardware:
+ mas->spi_ssr.xfer_prepared = true;
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
}
@@ -975,6 +1009,16 @@ static int spi_geni_unprepare_transfer_hardware(struct spi_master *spi)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
int count = 0;
+
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ if (mas->spi_ssr.is_ssr_down || !mas->spi_ssr.xfer_prepared) {
+ /* Call runtime_put to match get in prepare_transfer */
+ pm_runtime_put_noidle(mas->dev);
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+ return -EINVAL;
+ }
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+
if (mas->shared_se) {
struct se_geni_rsc *rsc;
int ret = 0;
@@ -1183,13 +1227,24 @@ static int spi_geni_transfer_one(struct spi_master *spi,
return -EINVAL;
}
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ if (mas->spi_ssr.is_ssr_down || !mas->spi_ssr.xfer_prepared) {
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+ return -EINVAL;
+ }
+
if (mas->cur_xfer_mode != GSI_DMA) {
reinit_completion(&mas->xfer_done);
setup_fifo_xfer(xfer, mas, slv->mode, spi);
if (spi->slave)
spi->slave_state = true;
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
timeout = wait_for_completion_timeout(&mas->xfer_done,
msecs_to_jiffies(SPI_XFER_TIMEOUT_MS));
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ if (mas->spi_ssr.is_ssr_down)
+ goto err_ssr_transfer_one;
+
if (spi->slave)
spi->slave_state = false;
@@ -1256,16 +1311,20 @@ static int spi_geni_transfer_one(struct spi_master *spi,
}
}
}
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
err_gsi_geni_transfer_one:
geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
dmaengine_terminate_all(mas->tx);
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
err_fifo_geni_transfer_one:
if (!spi->slave)
handle_fifo_timeout(mas, xfer);
if (spi->slave)
geni_se_dump_dbg_regs(&mas->spi_rsc, mas->base, mas->ipc);
+err_ssr_transfer_one:
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
return ret;
}
@@ -1302,6 +1361,8 @@ static void geni_spi_handle_tx(struct spi_geni_master *mas)
int bytes_per_fifo = tx_fifo_width;
int bytes_to_write = 0;
+ if (mas->spi_ssr.is_ssr_down)
+ break;
if ((mas->tx_fifo_width % mas->cur_word_len))
bytes_per_fifo =
(mas->cur_word_len / BITS_PER_BYTE) + 1;
@@ -1314,7 +1375,7 @@ static void geni_spi_handle_tx(struct spi_geni_master *mas)
mb();
}
mas->tx_rem_bytes -= max_bytes;
- if (!mas->tx_rem_bytes) {
+ if (!mas->tx_rem_bytes && !mas->spi_ssr.is_ssr_down) {
geni_write_reg(0, mas->base, SE_GENI_TX_WATERMARK_REG);
/* Barrier here before return to prevent further ISRs */
mb();
@@ -1325,14 +1386,18 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
{
int i = 0;
int fifo_width = (mas->tx_fifo_width >> 3);
- u32 rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
+ u32 rx_fifo_status;
int rx_bytes = 0;
int rx_wc = 0;
u8 *rx_buf = NULL;
+ if (mas->spi_ssr.is_ssr_down)
+ return;
+
if (!mas->cur_xfer)
return;
+ rx_fifo_status = geni_read_reg(mas->base, SE_GENI_RX_FIFO_STATUS);
rx_buf = mas->cur_xfer->rx_buf;
rx_wc = (rx_fifo_status & RX_FIFO_WC_MSK);
if (rx_fifo_status & RX_LAST) {
@@ -1358,6 +1423,8 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
int read_bytes = 0;
int j;
+ if (mas->spi_ssr.is_ssr_down)
+ break;
if ((mas->tx_fifo_width % mas->cur_word_len))
bytes_per_fifo =
(mas->cur_word_len / BITS_PER_BYTE) + 1;
@@ -1380,7 +1447,15 @@ static irqreturn_t geni_spi_irq(int irq, void *data)
"%s: device is suspended\n", __func__);
goto exit_geni_spi_irq;
}
+
+ if (mas->spi_ssr.is_ssr_down) {
+ mas->cmd_done = false;
+ complete(&mas->xfer_done);
+ dev_err(mas->dev, "IRQ at SSR down\n");
+ return IRQ_HANDLED;
+ }
m_irq = geni_read_reg(mas->base, SE_GENI_M_IRQ_STATUS);
+
if (mas->cur_xfer_mode == FIFO_MODE) {
if ((m_irq & M_RX_FIFO_WATERMARK_EN) ||
(m_irq & M_RX_FIFO_LAST_EN))
@@ -1436,7 +1511,8 @@ static irqreturn_t geni_spi_irq(int irq, void *data)
mas->cmd_done = true;
}
exit_geni_spi_irq:
- geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
+ if (!mas->spi_ssr.is_ssr_down)
+ geni_write_reg(m_irq, mas->base, SE_GENI_M_IRQ_CLEAR);
if (mas->cmd_done) {
mas->cmd_done = false;
complete(&mas->xfer_done);
@@ -1488,6 +1564,8 @@ static int spi_geni_probe(struct platform_device *pdev)
}
geni_mas->wrapper_dev = &wrapper_pdev->dev;
geni_mas->spi_rsc.wrapper_dev = &wrapper_pdev->dev;
+ rsc->rsc_ssr.ssr_enable = of_property_read_bool(pdev->dev.of_node,
+ "ssr-enable");
ret = geni_se_resources_init(rsc, SPI_CORE2X_VOTE,
(DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
if (ret) {
@@ -1624,10 +1702,12 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->unprepare_transfer_hardware
= spi_geni_unprepare_transfer_hardware;
spi->auto_runtime_pm = false;
-
+ rsc->rsc_ssr.force_suspend = ssr_spi_force_suspend;
+ rsc->rsc_ssr.force_resume = ssr_spi_force_resume;
init_completion(&geni_mas->xfer_done);
init_completion(&geni_mas->tx_cb);
init_completion(&geni_mas->rx_cb);
+ mutex_init(&geni_mas->spi_ssr.ssr_lock);
pm_runtime_set_suspended(&pdev->dev);
if (!geni_mas->dis_autosuspend) {
pm_runtime_set_autosuspend_delay(&pdev->dev,
@@ -1691,6 +1771,12 @@ static int spi_geni_runtime_resume(struct device *dev)
struct spi_master *spi = get_spi_master(dev);
struct spi_geni_master *geni_mas = spi_master_get_devdata(spi);
+ if (geni_mas->spi_ssr.is_ssr_down) {
+ GENI_SE_ERR(geni_mas->ipc, false, NULL,
+ "%s: Error runtime resume in SSR down\n", __func__);
+ return -EAGAIN;
+ }
+
if (geni_mas->shared_se) {
ret = se_geni_clks_on(&geni_mas->spi_rsc);
if (ret)
@@ -1755,6 +1841,49 @@ static int spi_geni_suspend(struct device *dev)
}
#endif
+static int ssr_spi_force_suspend(struct device *dev)
+{
+ struct spi_master *spi = get_spi_master(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret = 0;
+
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ mas->spi_ssr.xfer_prepared = false;
+ disable_irq(mas->irq);
+ mas->spi_ssr.is_ssr_down = true;
+ complete(&mas->xfer_done);
+
+ if (!pm_runtime_status_suspended(mas->dev)) {
+ ret = spi_geni_runtime_suspend(mas->dev);
+ if (ret) {
+ dev_err(mas->dev, "runtime suspend failed %d\n", ret);
+ } else {
+ pm_runtime_disable(mas->dev);
+ pm_runtime_set_suspended(mas->dev);
+ pm_runtime_enable(mas->dev);
+ }
+ }
+
+ GENI_SE_DBG(mas->ipc, false, mas->dev, "force suspend done\n");
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+
+ return ret;
+}
+
+static int ssr_spi_force_resume(struct device *dev)
+{
+ struct spi_master *spi = get_spi_master(dev);
+ struct spi_geni_master *mas = spi_master_get_devdata(spi);
+
+ mutex_lock(&mas->spi_ssr.ssr_lock);
+ mas->spi_ssr.is_ssr_down = false;
+ enable_irq(mas->irq);
+ GENI_SE_DBG(mas->ipc, false, mas->dev, "force resume done\n");
+ mutex_unlock(&mas->spi_ssr.ssr_lock);
+
+ return 0;
+}
+
static const struct dev_pm_ops spi_geni_pm_ops = {
SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
spi_geni_runtime_resume, NULL)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 8b29b71ad6ac..b01aaa097e9a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -587,6 +587,46 @@ static int of_thermal_aggregate_trip_types(struct thermal_zone_device *tz,
return 0;
}
+static bool of_thermal_is_trips_triggered(struct thermal_zone_device *tz,
+ int temp)
+{
+ int tt, th, trip, last_temp;
+ struct __thermal_zone *data = tz->devdata;
+ bool triggered = false;
+
+ mutex_lock(&tz->lock);
+ last_temp = tz->last_temperature;
+ for (trip = 0; trip < data->ntrips; trip++) {
+
+ if (!tz->tzp->tracks_low) {
+ tt = data->trips[trip].temperature;
+ if (temp >= tt && last_temp < tt) {
+ triggered = true;
+ break;
+ }
+ th = tt - data->trips[trip].hysteresis;
+ if (temp <= th && last_temp > th) {
+ triggered = true;
+ break;
+ }
+ } else {
+ tt = data->trips[trip].temperature;
+ if (temp <= tt && last_temp > tt) {
+ triggered = true;
+ break;
+ }
+ th = tt + data->trips[trip].hysteresis;
+ if (temp >= th && last_temp < th) {
+ triggered = true;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&tz->lock);
+
+ return triggered;
+}
+
/*
* of_thermal_aggregate_trip - aggregate trip temperatures across sibling
* thermal zones.
@@ -623,6 +663,8 @@ static void handle_thermal_trip(struct thermal_zone_device *tz,
thermal_zone_device_update(zone,
THERMAL_EVENT_UNSPECIFIED);
} else {
+ if (!of_thermal_is_trips_triggered(zone, trip_temp))
+ continue;
thermal_zone_device_update_temp(zone,
THERMAL_EVENT_UNSPECIFIED, trip_temp);
}
diff --git a/drivers/thermal/qcom/adc-tm5.c b/drivers/thermal/qcom/adc-tm5.c
index 11baf9c21631..f260c4f42c12 100644
--- a/drivers/thermal/qcom/adc-tm5.c
+++ b/drivers/thermal/qcom/adc-tm5.c
@@ -218,9 +218,30 @@ static int adc_tm5_configure(struct adc_tm_sensor *sensor,
buf[7] |= ADC_TM_Mn_MEAS_EN;
ret = adc_tm5_write_reg(chip,
- ADC_TM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 8);
+ ADC_TM_Mn_ADC_CH_SEL_CTL(btm_chan_idx), buf, 1);
if (ret < 0) {
- pr_err("adc-tm block write failed with %d\n", ret);
+ pr_err("adc-tm channel select failed\n");
+ return ret;
+ }
+
+ ret = adc_tm5_write_reg(chip,
+ ADC_TM_Mn_MEAS_INTERVAL_CTL(btm_chan_idx), &buf[5], 1);
+ if (ret < 0) {
+ pr_err("adc-tm timer select failed\n");
+ return ret;
+ }
+
+ ret = adc_tm5_write_reg(chip,
+ ADC_TM_Mn_CTL(btm_chan_idx), &buf[6], 1);
+ if (ret < 0) {
+ pr_err("adc-tm parameter select failed\n");
+ return ret;
+ }
+
+ ret = adc_tm5_write_reg(chip,
+ ADC_TM_Mn_EN(btm_chan_idx), &buf[7], 1);
+ if (ret < 0) {
+ pr_err("adc-tm monitoring enable failed\n");
return ret;
}
diff --git a/drivers/thermal/qcom/qmi_cooling.c b/drivers/thermal/qcom/qmi_cooling.c
index 96b3a0b6ed36..2309f029c859 100644
--- a/drivers/thermal/qcom/qmi_cooling.c
+++ b/drivers/thermal/qcom/qmi_cooling.c
@@ -70,6 +70,10 @@ static struct qmi_dev_info device_clients[] = {
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
+ .dev_name = "pa_fr1",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
.dev_name = "cx_vdd_limit",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
@@ -94,6 +98,10 @@ static struct qmi_dev_info device_clients[] = {
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
+ .dev_name = "charge_state",
+ .type = QMI_CDEV_MAX_LIMIT_TYPE,
+ },
+ {
.dev_name = "mmw0",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
@@ -110,19 +118,19 @@ static struct qmi_dev_info device_clients[] = {
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
- .dev_name = "modem_skin0",
+ .dev_name = "mmw_skin0",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
- .dev_name = "modem_skin1",
+ .dev_name = "mmw_skin1",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
- .dev_name = "modem_skin2",
+ .dev_name = "mmw_skin2",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
- .dev_name = "modem_skin3",
+ .dev_name = "mmw_skin3",
.type = QMI_CDEV_MAX_LIMIT_TYPE,
},
{
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index a3c4db7e5ebc..d43c2cd36854 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1137,6 +1137,38 @@ static void msm_geni_serial_rx_fsm_rst(struct uart_port *uport)
geni_write_reg_nolog(rx_irq_en, uport->membase, SE_DMA_RX_IRQ_EN_SET);
}
+static void msm_geni_serial_set_manual_flow(bool enable,
+ struct msm_geni_serial_port *port)
+{
+ u32 uart_manual_rfr = 0;
+
+ if (!enable) {
+ uart_manual_rfr |= (UART_MANUAL_RFR_EN);
+ geni_write_reg_nolog(uart_manual_rfr, port->uport.membase,
+ SE_UART_MANUAL_RFR);
+ /* UART FW needs delay per HW experts recommendation */
+ udelay(10);
+
+ uart_manual_rfr |= (UART_RFR_NOT_READY);
+ geni_write_reg_nolog(uart_manual_rfr, port->uport.membase,
+ SE_UART_MANUAL_RFR);
+ /*
+ * Ensure that the manual flow on writes go through before
+ * doing a stop_rx.
+ */
+ mb();
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s: Manual Flow Enabled, HW Flow OFF\n", __func__);
+ } else {
+ geni_write_reg_nolog(0, port->uport.membase,
+ SE_UART_MANUAL_RFR);
+ /* Ensure that the manual flow off writes go through */
+ mb();
+ IPC_LOG_MSG(port->ipc_log_pwr,
+ "%s: Manual Flow Disabled, HW Flow ON\n", __func__);
+ }
+}
+
static void stop_rx_sequencer(struct uart_port *uport)
{
unsigned int geni_s_irq_en;
@@ -1609,6 +1641,7 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
{
struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
unsigned long flags;
+ int ret;
if (!uart_console(uport)) {
msm_geni_serial_power_on(uport);
@@ -1634,7 +1667,13 @@ static void msm_geni_serial_shutdown(struct uart_port *uport)
}
msm_port->ioctl_count = 0;
}
- msm_geni_serial_power_off(uport);
+
+ ret = pm_runtime_put_sync_suspend(uport->dev);
+ if (ret) {
+ IPC_LOG_MSG(msm_port->ipc_log_pwr,
+ "%s: Failed to suspend:%d\n", __func__, ret);
+ }
+
if (msm_port->wakeup_irq > 0) {
irq_set_irq_wake(msm_port->wakeup_irq, 0);
disable_irq(msm_port->wakeup_irq);
@@ -1875,6 +1914,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
__func__, ret);
return;
}
+ disable_irq(uport->irq);
+ msm_geni_serial_set_manual_flow(false, port);
}
/* Take a spinlock else stop_rx causes a race with an ISR due to Cancel
* and FSM_RESET. This also has a potential race with the dma_map/unmap
@@ -1971,8 +2012,8 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
if (termios->c_cflag & CRTSCTS) {
geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
- IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual flow off\n",
- __func__);
+ IPC_LOG_MSG(port->ipc_log_misc,
+ "%s: Manual flow Disabled, HW Flow ON\n", __func__);
}
IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
@@ -1983,6 +2024,10 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
IPC_LOG_MSG(port->ipc_log_misc, "BitsChar%d stop bit%d\n",
bits_per_char, stop_bit_len);
exit_set_termios:
+ if (!uart_console(uport)) {
+ msm_geni_serial_set_manual_flow(true, port);
+ enable_irq(uport->irq);
+ }
msm_geni_serial_start_rx(uport);
if (!uart_console(uport))
msm_geni_serial_power_off(uport);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 01fcdc7ff077..d1cdf5459361 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -846,8 +846,13 @@ int tty_ldisc_init(struct tty_struct *tty)
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
- if (tty->ldisc)
+ if (tty->ldisc) {
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (tty->echo_delayed_work.work.func)
+ cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
tty_ldisc_put(tty->ldisc);
+ }
tty->ldisc = NULL;
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c74ea1209782..8e24a09ddbcf 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -3041,6 +3041,7 @@ static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
}
static void dwc3_otg_sm_work(struct work_struct *w);
+static int get_psy_type(struct dwc3_msm *mdwc);
static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
{
@@ -3207,6 +3208,8 @@ static void check_for_sdp_connection(struct work_struct *w)
}
}
+#define DP_PULSE_WIDTH_MSEC 200
+
static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -3230,6 +3233,13 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
mdwc->vbus_active = event;
+
+ if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_CDP &&
+ mdwc->vbus_active) {
+ dev_dbg(mdwc->dev, "Connected to CDP, pull DP up\n");
+ usb_phy_drive_dp_pulse(mdwc->hs_phy, DP_PULSE_WIDTH_MSEC);
+ }
+
if ((dwc->dr_mode == USB_DR_MODE_OTG) && !mdwc->in_restart)
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
@@ -3588,12 +3598,13 @@ static int dwc3_msm_probe(struct platform_device *pdev)
}
/*
- * Create freezable workqueue for sm_work so that it gets scheduled only
- * after pm_resume has happened completely. This helps in avoiding race
- * conditions between xhci_plat_resume and xhci_runtime_resume; and also
- * between hcd disconnect and xhci_resume.
+ * Create an ordered freezable workqueue for sm_work so that it gets
+ * scheduled only after pm_resume has happened completely. This helps
+ * in avoiding race conditions between xhci_plat_resume and
+ * xhci_runtime_resume and also between hcd disconnect and xhci_resume.
*/
- mdwc->sm_usb_wq = create_freezable_workqueue("k_sm_usb");
+ mdwc->sm_usb_wq = alloc_ordered_workqueue("k_sm_usb",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM);
if (!mdwc->sm_usb_wq) {
destroy_workqueue(mdwc->dwc3_wq);
return -ENOMEM;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 879a0efcb5b7..752f745ecd6b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -216,7 +216,8 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
- if (!dep->endpoint.desc || !dwc->pullups_connected) {
+ if (!dep->endpoint.desc || !dwc->softconnect ||
+ !dwc->vbus_active) {
dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
ret = -ESHUTDOWN;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index fa44aded7432..02819487146b 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1933,7 +1933,7 @@ static int dwc3_gadget_wakeup_int(struct dwc3 *dwc)
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
break;
case DWC3_LINK_STATE_U1:
- if (dwc->gadget.speed != USB_SPEED_SUPER) {
+ if (dwc->gadget.speed < USB_SPEED_SUPER) {
link_recover_only = true;
break;
}
@@ -2042,7 +2042,7 @@ static int dwc_gadget_func_wakeup(struct usb_gadget *g, int interface_id)
int ret = 0;
struct dwc3 *dwc = gadget_to_dwc(g);
- if (!g || (g->speed != USB_SPEED_SUPER))
+ if (!g || (g->speed < USB_SPEED_SUPER))
return -ENOTSUPP;
if (dwc3_gadget_is_suspended(dwc)) {
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 54bf5ecb56e8..27c8ee0899a2 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -458,7 +458,7 @@ static int usb_func_wakeup_int(struct usb_function *func)
return -EINVAL;
gadget = func->config->cdev->gadget;
- if ((gadget->speed != USB_SPEED_SUPER) || !func->func_wakeup_allowed) {
+ if ((gadget->speed < USB_SPEED_SUPER) || !func->func_wakeup_allowed) {
DBG(func->config->cdev,
"Function Wakeup is not possible. speed=%u, func_wakeup_allowed=%u\n",
gadget->speed,
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index 8befe7bd54a8..c2926fafbbda 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -1649,7 +1649,7 @@ static ssize_t cser_rw_write(struct file *file, const char __user *ubuf,
port->debugfs_rw_enable = !!input;
if (port->debugfs_rw_enable) {
gadget = cser->func.config->cdev->gadget;
- if (gadget->speed == USB_SPEED_SUPER &&
+ if (gadget->speed >= USB_SPEED_SUPER &&
func->func_is_suspended) {
pr_debug("Calling usb_func_wakeup\n");
ret = usb_func_wakeup(func);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 4a0b075af8b0..7af6f6ac8921 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -509,7 +509,7 @@ EXPORT_SYMBOL(usb_gsi_ep_op);
int usb_gadget_func_wakeup(struct usb_gadget *gadget,
int interface_id)
{
- if (gadget->speed != USB_SPEED_SUPER)
+ if (gadget->speed < USB_SPEED_SUPER)
return -EOPNOTSUPP;
if (!gadget->ops->func_wakeup)
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 3cbe8b8a7768..d344027e1406 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -448,7 +448,6 @@ struct usbpd {
struct regulator *vconn;
bool vbus_enabled;
bool vconn_enabled;
- bool vconn_is_external;
u8 tx_msgid[SOPII_MSG + 1];
u8 rx_msgid[SOPII_MSG + 1];
@@ -830,11 +829,6 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
return -ENOTSUPP;
}
- /* Can't sink more than 5V if VCONN is sourced from the VBUS input */
- if (pd->vconn_enabled && !pd->vconn_is_external &&
- pd->requested_voltage > 5000000)
- return -ENOTSUPP;
-
pd->requested_current = curr;
pd->requested_pdo = pdo_pos;
@@ -3053,20 +3047,6 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
- /*
- * if VCONN is connected to VBUS, make sure we are
- * not in high voltage contract, otherwise reject.
- */
- if (!pd->vconn_is_external &&
- (pd->requested_voltage > 5000000)) {
- ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
- SOP_MSG);
- if (ret)
- usbpd_set_state(pd, PE_SEND_SOFT_RESET);
-
- break;
- }
-
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -4573,9 +4553,6 @@ struct usbpd *usbpd_create(struct device *parent)
extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
EXTCON_PROP_USB_SS);
- pd->vconn_is_external = device_property_present(parent,
- "qcom,vconn-uses-external-source");
-
pd->num_sink_caps = device_property_read_u32_array(parent,
"qcom,default-sink-caps", NULL, 0);
if (pd->num_sink_caps > 0) {
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index d00e6a376950..288b51039e8f 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -27,6 +27,17 @@
#include <linux/usb/phy.h>
#include <linux/reset.h>
+#define QUSB2PHY_PLL_PWR_CTL 0x18
+#define REF_BUF_EN BIT(0)
+#define REXT_EN BIT(1)
+#define PLL_BYPASSNL BIT(2)
+#define REXT_TRIM_0 BIT(4)
+
+#define QUSB2PHY_PLL_AUTOPGM_CTL1 0x1C
+#define PLL_RESET_N_CNT_5 0x5
+#define PLL_RESET_N BIT(4)
+#define PLL_AUTOPGM_EN BIT(7)
+
#define QUSB2PHY_PLL_STATUS 0x38
#define QUSB2PHY_PLL_LOCK BIT(5)
@@ -55,6 +66,7 @@
#define CORE_READY_STATUS BIT(0)
#define QUSB2PHY_PORT_UTMI_CTRL1 0xC0
+#define SUSPEND_N BIT(5)
#define TERM_SELECT BIT(4)
#define XCVR_SELECT_FS BIT(2)
#define OP_MODE_NON_DRIVE BIT(0)
@@ -781,6 +793,86 @@ static int qusb_phy_notify_disconnect(struct usb_phy *phy,
return 0;
}
+static int qusb_phy_drive_dp_pulse(struct usb_phy *phy,
+ unsigned int pulse_width)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret;
+
+ dev_dbg(qphy->phy.dev, "connected to a CDP, drive DP up\n");
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ return ret;
+ }
+ qusb_phy_gdsc(qphy, true);
+ qusb_phy_enable_clocks(qphy, true);
+
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "phyassert failed\n");
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "deassert failed\n");
+
+ /* Configure PHY to enable control on DP/DM lines */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+
+ writel_relaxed(TERM_SELECT | XCVR_SELECT_FS | OP_MODE_NON_DRIVE |
+ SUSPEND_N, qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+
+ writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ writel_relaxed(PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+
+ writel_relaxed(REF_BUF_EN | REXT_EN | PLL_BYPASSNL | REXT_TRIM_0,
+ qphy->base + QUSB2PHY_PLL_PWR_CTL);
+
+ usleep_range(5, 10);
+
+ writel_relaxed(0x15, qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+ writel_relaxed(PLL_RESET_N | PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC1);
+ writel_relaxed(0x00, qphy->base + QUSB2PHY_PORT_QC2);
+
+ usleep_range(50, 60);
+ /* Enable Rdp_en to pull DP up to 3V */
+ writel_relaxed(RDP_UP_EN, qphy->base + QUSB2PHY_PORT_QC2);
+ msleep(pulse_width);
+
+ /* Put the PHY and DP back to normal state */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN); /* 23 */
+
+ writel_relaxed(PLL_AUTOPGM_EN | PLL_RESET_N | PLL_RESET_N_CNT_5,
+ qphy->base + QUSB2PHY_PLL_AUTOPGM_CTL1);
+
+ writel_relaxed(UTMI_ULPI_SEL, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ writel_relaxed(TERM_SELECT, qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+
+ qusb_phy_enable_clocks(qphy, false);
+ qusb_phy_gdsc(qphy, false);
+
+ ret = qusb_phy_enable_power(qphy, false);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
{
int ret = 0;
@@ -1240,6 +1332,7 @@ static int qusb_phy_probe(struct platform_device *pdev)
qphy->phy.type = USB_PHY_TYPE_USB2;
qphy->phy.notify_connect = qusb_phy_notify_connect;
qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+ qphy->phy.drive_dp_pulse = qusb_phy_drive_dp_pulse;
/*
* On some platforms multiple QUSB PHYs are available. If QUSB PHY is
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 1ce9ceef94ca..ef9e211a4fbb 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -25,6 +25,7 @@ struct mhi_buf_info;
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_DTR_SIGNAL: DTR signaling update
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
@@ -35,6 +36,7 @@ struct mhi_buf_info;
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
+ MHI_CB_DTR_SIGNAL,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
diff --git a/include/linux/msm_ep_pcie.h b/include/linux/msm_ep_pcie.h
index 720a0811f85c..4e2144bd2b1e 100644
--- a/include/linux/msm_ep_pcie.h
+++ b/include/linux/msm_ep_pcie.h
@@ -113,7 +113,7 @@ struct ep_pcie_hw {
u32 num_entries);
int (*get_msi_config)(struct ep_pcie_msi_config *cfg);
int (*trigger_msi)(u32 idx);
- int (*wakeup_host)(void);
+ int (*wakeup_host)(enum ep_pcie_event event);
int (*enable_endpoint)(enum ep_pcie_options opt);
int (*disable_endpoint)(void);
int (*config_db_routing)(struct ep_pcie_db_config chdb_cfg,
@@ -231,12 +231,13 @@ int ep_pcie_trigger_msi(struct ep_pcie_hw *phandle, u32 idx);
/*
* ep_pcie_wakeup_host - wake up the host.
* @phandle: PCIe endpoint HW driver handle
+ * @event: PCIe event of ep_pcie_event type
*
* This function asserts WAKE GPIO to wake up the host.
*
* Return: 0 on success, negative value on error
*/
-int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle);
+int ep_pcie_wakeup_host(struct ep_pcie_hw *phandle, enum ep_pcie_event event);
/*
* ep_pcie_enable_endpoint - enable PCIe endpoint.
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 85028238b94a..f7bc4c9ef69b 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -20,6 +20,9 @@
#include <linux/list.h>
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
+/* SSC Qup SSR related */
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/scm.h>
/* Transfer mode supported by GENI Serial Engines */
enum se_xfer_mode {
@@ -39,8 +42,43 @@ enum se_protocol_types {
SPI_SLAVE
};
+/* Notifier block Structure */
+struct ssc_qup_nb {
+ struct notifier_block nb;
+ void *next; /*Notifier block pointer to next notifier block structure*/
+};
+
+/**
+ * struct ssc_qup_ssr GENI Serial Engine SSC qup SSR Structure.
+ * @probe_completed To ignore up notification during probe.
+ * @is_ssr_down To check SE status.
+ * @subsys_name Subsystem name for ssr registration.
+ * @active_list_head List Head of all client in SSC QUPv3.
+ */
+struct ssc_qup_ssr {
+ struct ssc_qup_nb ssc_qup_nb;
+ bool probe_completed;
+ bool is_ssr_down;
+ const char *subsys_name;
+ struct list_head active_list_head;
+};
+
+/**
+ * struct se_rsc_ssr GENI Resource SSR Structure.
+ * @active_list List of SSC qup SE clients.
+ * @force_suspend Function pointer for Subsystem shutdown case.
+ * @force_resume Function pointer for Subsystem restart case.
+ * @ssr_enable To check SSC Qup SSR enable status.
+ */
+struct se_rsc_ssr {
+ struct list_head active_list;
+ int (*force_suspend)(struct device *ctrl_dev);
+ int (*force_resume)(struct device *ctrl_dev);
+ bool ssr_enable;
+};
+
/**
- * struct geni_se_rsc - GENI Serial Engine Resource
+ * struct se_geni_rsc - GENI Serial Engine Resource
* @ctrl_dev Pointer to controller device.
* @wrapper_dev: Pointer to the parent QUPv3 core.
* @se_clk: Handle to the core serial engine clock.
@@ -78,7 +116,8 @@ struct se_geni_rsc {
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
struct pinctrl_state *geni_gpio_sleep;
- int clk_freq_out;
+ int clk_freq_out;
+ struct se_rsc_ssr rsc_ssr;
};
#define PINCTRL_DEFAULT "default"
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 5be685b724a9..b3153c7ee431 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -176,6 +176,8 @@ struct usb_phy {
/* reset the PHY clocks */
int (*reset)(struct usb_phy *x);
+
+ int (*drive_dp_pulse)(struct usb_phy *x, unsigned int pulse_width);
};
/**
@@ -259,6 +261,15 @@ usb_phy_reset(struct usb_phy *x)
return 0;
}
+static inline int
+usb_phy_drive_dp_pulse(struct usb_phy *x, unsigned int pulse_width)
+{
+ if (x && x->drive_dp_pulse)
+ return x->drive_dp_pulse(x, pulse_width);
+
+ return 0;
+}
+
/* for usb host and peripheral controller drivers */
#if IS_ENABLED(CONFIG_USB_PHY)
extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
index dcdc851e7e24..e4159a78ae99 100644
--- a/include/soc/qcom/scm.h
+++ b/include/soc/qcom/scm.h
@@ -31,6 +31,7 @@
#define SCM_SVC_QDSS 0x16
#define SCM_SVC_RTIC 0x19
#define SCM_SVC_TZSCHEDULER 0xFC
+#define TZ_SVC_QUP_FW_LOAD 0x1F
#define SCM_FUSE_READ 0x7
#define SCM_CMD_HDCP 0x01
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0f3ae67ee192..e02dbc1c1363 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -9684,10 +9684,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
{
unsigned long capacity = arch_scale_cpu_capacity(sd, cpu);
struct sched_group *sdg = sd->groups;
- struct max_cpu_capacity *mcc;
- unsigned long max_capacity;
- int max_cap_cpu;
- unsigned long flags;
capacity *= arch_scale_max_freq_capacity(sd, cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
@@ -9695,26 +9691,6 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
capacity = min(capacity, thermal_cap(cpu));
cpu_rq(cpu)->cpu_capacity_orig = capacity;
- mcc = &cpu_rq(cpu)->rd->max_cpu_capacity;
-
- raw_spin_lock_irqsave(&mcc->lock, flags);
- max_capacity = mcc->val;
- max_cap_cpu = mcc->cpu;
-
- if ((max_capacity > capacity && max_cap_cpu == cpu) ||
- max_capacity < capacity) {
- mcc->val = capacity;
- mcc->cpu = cpu;
-#ifdef CONFIG_SCHED_DEBUG
- raw_spin_unlock_irqrestore(&mcc->lock, flags);
- printk_deferred(KERN_INFO "CPU%d: update max cpu_capacity %lu\n",
- cpu, capacity);
- goto skip_unlock;
-#endif
- }
- raw_spin_unlock_irqrestore(&mcc->lock, flags);
-
-skip_unlock: __attribute__ ((unused));
capacity *= scale_rt_capacity(cpu);
capacity >>= SCHED_CAPACITY_SHIFT;
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index b0374d77e1eb..33f6d81765a2 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -1880,12 +1880,12 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
sd = *per_cpu_ptr(d.sd, i);
- if ((max_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig >
- cpu_rq(max_cpu)->cpu_capacity_orig))
+ if ((max_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) >
+ arch_scale_cpu_capacity(NULL, max_cpu)))
WRITE_ONCE(d.rd->max_cap_orig_cpu, i);
- if ((min_cpu < 0) || (cpu_rq(i)->cpu_capacity_orig <
- cpu_rq(min_cpu)->cpu_capacity_orig))
+ if ((min_cpu < 0) || (arch_scale_cpu_capacity(NULL, i) <
+ arch_scale_cpu_capacity(NULL, min_cpu)))
WRITE_ONCE(d.rd->min_cap_orig_cpu, i);
cpu_attach_domain(sd, d.rd, i);
@@ -1896,14 +1896,26 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
int max_cpu = READ_ONCE(d.rd->max_cap_orig_cpu);
int min_cpu = READ_ONCE(d.rd->min_cap_orig_cpu);
- if ((cpu_rq(i)->cpu_capacity_orig
- != cpu_rq(min_cpu)->cpu_capacity_orig) &&
- (cpu_rq(i)->cpu_capacity_orig
- != cpu_rq(max_cpu)->cpu_capacity_orig)) {
+ if ((arch_scale_cpu_capacity(NULL, i)
+ != arch_scale_cpu_capacity(NULL, min_cpu)) &&
+ (arch_scale_cpu_capacity(NULL, i)
+ != arch_scale_cpu_capacity(NULL, max_cpu))) {
WRITE_ONCE(d.rd->mid_cap_orig_cpu, i);
break;
}
}
+
+ /*
+ * The max_cpu_capacity reflect the original capacity which does not
+ * change dynamically. So update the max cap CPU and its capacity
+ * here.
+ */
+ if (d.rd->max_cap_orig_cpu != -1) {
+ d.rd->max_cpu_capacity.cpu = d.rd->max_cap_orig_cpu;
+ d.rd->max_cpu_capacity.val = arch_scale_cpu_capacity(NULL,
+ d.rd->max_cap_orig_cpu);
+ }
+
rcu_read_unlock();
if (!cpumask_empty(cpu_map))
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index e3cbe33a043a..aea9412a8622 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -207,11 +207,14 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
{
const struct qrtr_ctrl_pkt *pkt;
u64 pl_buf = 0;
+ u32 type;
if (!hdr || !skb || !skb->data)
return;
- if (hdr->type == QRTR_TYPE_DATA) {
+ type = le32_to_cpu(hdr->type);
+
+ if (type == QRTR_TYPE_DATA) {
pl_buf = *(u64 *)(skb->data + QRTR_HDR_MAX_SIZE);
QRTR_INFO(node->ilc,
"TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n",
@@ -222,29 +225,34 @@ static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
current->comm);
} else {
pkt = (struct qrtr_ctrl_pkt *)(skb->data + QRTR_HDR_MAX_SIZE);
- if (hdr->type == QRTR_TYPE_NEW_SERVER ||
- hdr->type == QRTR_TYPE_DEL_SERVER)
+ if (type == QRTR_TYPE_NEW_SERVER ||
+ type == QRTR_TYPE_DEL_SERVER)
QRTR_INFO(node->ilc,
"TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
- hdr->type, le32_to_cpu(pkt->server.service),
+ type, le32_to_cpu(pkt->server.service),
le32_to_cpu(pkt->server.instance),
le32_to_cpu(pkt->server.node),
le32_to_cpu(pkt->server.port));
- else if (hdr->type == QRTR_TYPE_DEL_CLIENT ||
- hdr->type == QRTR_TYPE_RESUME_TX)
+ else if (type == QRTR_TYPE_DEL_CLIENT ||
+ type == QRTR_TYPE_RESUME_TX)
QRTR_INFO(node->ilc,
"TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
- hdr->type, le32_to_cpu(pkt->client.node),
+ type, le32_to_cpu(pkt->client.node),
le32_to_cpu(pkt->client.port));
- else if (hdr->type == QRTR_TYPE_HELLO ||
- hdr->type == QRTR_TYPE_BYE)
+ else if (type == QRTR_TYPE_HELLO ||
+ type == QRTR_TYPE_BYE) {
QRTR_INFO(node->ilc,
"TX CTRL: cmd:0x%x node[0x%x]\n",
- hdr->type, hdr->src_node_id);
- else if (hdr->type == QRTR_TYPE_DEL_PROC)
+ type, hdr->src_node_id);
+ if (le32_to_cpu(hdr->dst_node_id) == 0 ||
+ le32_to_cpu(hdr->dst_node_id) == 3)
+ pr_err("qrtr: Modem QMI Readiness TX cmd:0x%x node[0x%x]\n",
+ type, hdr->src_node_id);
+ }
+ else if (type == QRTR_TYPE_DEL_PROC)
QRTR_INFO(node->ilc,
"TX CTRL: cmd:0x%x node[0x%x]\n",
- hdr->type, pkt->proc.node);
+ type, pkt->proc.node);
}
}
@@ -283,10 +291,14 @@ static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
cb->type, le32_to_cpu(pkt->client.node),
le32_to_cpu(pkt->client.port));
else if (cb->type == QRTR_TYPE_HELLO ||
- cb->type == QRTR_TYPE_BYE)
+ cb->type == QRTR_TYPE_BYE) {
QRTR_INFO(node->ilc,
"RX CTRL: cmd:0x%x node[0x%x]\n",
cb->type, cb->src_node);
+ if (cb->src_node == 0 || cb->src_node == 3)
+ pr_err("qrtr: Modem QMI Readiness RX cmd:0x%x node[0x%x]\n",
+ cb->type, cb->src_node);
+ }
}
}