aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/msm/acc.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/msm/ids.txt65
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt38
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53cc25
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt18
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,hfpll.txt40
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,krait-cc.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt37
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt25
-rw-r--r--Documentation/devicetree/bindings/firmware/qcom,scm.txt25
-rw-r--r--Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt11
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.txt38
-rw-r--r--Documentation/devicetree/bindings/pci/pci-iommu.txt171
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt37
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt132
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt29
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts376
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts6
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts15
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-coresight.dtsi197
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts427
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-ifc6410.dts136
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-pins.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi1019
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi105
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi419
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/krait-l2-accessors.c58
-rw-r--r--arch/arm/configs/multi_v7_defconfig65
-rw-r--r--arch/arm/configs/qcom_defconfig158
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h20
-rw-r--r--arch/arm/mm/dma-mapping.c9
-rw-r--r--arch/arm64/Kconfig26
-rw-r--r--arch/arm64/boot/dts/qcom/Makefile5
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi13
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi61
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi161
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi203
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-sbc.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi238
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-bus.dtsi858
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi254
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi118
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi8
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi25
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi558
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi1
-rw-r--r--arch/arm64/boot/dts/qcom/msm8996.dtsi370
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi135
-rw-r--r--arch/arm64/boot/dts/qcom/pm8994.dtsi14
-rw-r--r--arch/arm64/configs/defconfig139
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/include/asm/cpu_ops.h5
-rw-r--r--arch/arm64/include/asm/device.h9
-rw-r--r--arch/arm64/include/asm/dma-iommu.h36
-rw-r--r--arch/arm64/include/asm/smp_plat.h2
-rw-r--r--arch/arm64/kernel/cpu_ops.c14
-rw-r--r--arch/arm64/kernel/psci.c1
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c4
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/dma-mapping.c1095
-rw-r--r--drivers/base/platform.c13
-rw-r--r--drivers/base/power/opp/core.c120
-rw-r--r--drivers/clk/clk-mux.c76
-rw-r--r--drivers/clk/clk.c105
-rw-r--r--drivers/clk/qcom/Kconfig65
-rw-r--r--drivers/clk/qcom/Makefile10
-rw-r--r--drivers/clk/qcom/clk-a53.c202
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c133
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h11
-rw-r--r--drivers/clk/qcom/clk-cpu-8996.c566
-rw-r--r--drivers/clk/qcom/clk-hfpll.c253
-rw-r--r--drivers/clk/qcom/clk-hfpll.h54
-rw-r--r--drivers/clk/qcom/clk-krait.c167
-rw-r--r--drivers/clk/qcom/clk-krait.h49
-rw-r--r--drivers/clk/qcom/clk-pll.c96
-rw-r--r--drivers/clk/qcom/clk-pll.h13
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.c288
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.h63
-rw-r--r--drivers/clk/qcom/clk-rpm.c503
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c653
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c83
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c3
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c172
-rw-r--r--drivers/clk/qcom/gdsc.c144
-rw-r--r--drivers/clk/qcom/gdsc.h19
-rw-r--r--drivers/clk/qcom/hfpll.c109
-rw-r--r--drivers/clk/qcom/kpss-xcc.c95
-rw-r--r--drivers/clk/qcom/krait-cc.c363
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c13
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c41
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c112
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c204
-rw-r--r--drivers/firmware/qcom_scm-32.c344
-rw-r--r--drivers/firmware/qcom_scm-64.c773
-rw-r--r--drivers/firmware/qcom_scm.c373
-rw-r--r--drivers/firmware/qcom_scm.h66
-rw-r--r--drivers/gpu/drm/drm_gem.c4
-rw-r--r--drivers/gpu/drm/i2c/Kconfig2
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c638
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h60
-rw-r--r--drivers/gpu/drm/i2c/adv7511_audio.c312
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c27
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c11
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c123
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c42
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c48
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c136
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c47
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h8
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c64
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/arm-smmu.c806
-rw-r--r--drivers/iommu/dma-iommu.c4
-rw-r--r--drivers/iommu/iommu.c12
-rw-r--r--drivers/iommu/msm_iommu.c4
-rw-r--r--drivers/iommu/of_iommu.c59
-rw-r--r--drivers/iommu/qcom/Kconfig44
-rw-r--r--drivers/iommu/qcom/Makefile7
-rw-r--r--drivers/iommu/qcom/msm_iommu-v1.c1540
-rw-r--r--drivers/iommu/qcom/msm_iommu.c206
-rw-r--r--drivers/iommu/qcom/msm_iommu_dev-v1.c708
-rw-r--r--drivers/iommu/qcom/msm_iommu_hw-v1.h2320
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.c645
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.h33
-rw-r--r--drivers/iommu/qcom/msm_iommu_perfmon.h233
-rw-r--r--drivers/iommu/qcom/msm_iommu_priv.h71
-rw-r--r--drivers/iommu/qcom/msm_iommu_sec.c795
-rw-r--r--drivers/misc/eeprom/Kconfig2
-rw-r--r--drivers/misc/eeprom/at24.c103
-rw-r--r--drivers/misc/eeprom/at25.c89
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c90
-rw-r--r--drivers/mmc/core/Kconfig21
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/pwrseq.c108
-rw-r--r--drivers/mmc/core/pwrseq.h19
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c81
-rw-r--r--drivers/mmc/core/pwrseq_simple.c91
-rw-r--r--drivers/mmc/host/mmci.c10
-rw-r--r--drivers/nvmem/Kconfig5
-rw-r--r--drivers/nvmem/core.c67
-rw-r--r--drivers/nvmem/imx-ocotp.c55
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c94
-rw-r--r--drivers/nvmem/mtk-efuse.c47
-rw-r--r--drivers/nvmem/mxs-ocotp.c83
-rw-r--r--drivers/nvmem/qfprom.c56
-rw-r--r--drivers/nvmem/rockchip-efuse.c49
-rw-r--r--drivers/nvmem/sunxi_sid.c54
-rw-r--r--drivers/nvmem/vf610-ocotp.c44
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/device.c77
-rw-r--r--drivers/of/irq.c70
-rw-r--r--drivers/of/of_pci.c102
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/phy/phy-qcom-ufs.c21
-rw-r--r--drivers/power/avs/Kconfig14
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1988
-rw-r--r--drivers/regulator/qcom_smd-regulator.c153
-rw-r--r--drivers/regulator/qcom_spmi-regulator.c284
-rw-r--r--drivers/remoteproc/Kconfig32
-rw-r--r--drivers/remoteproc/Makefile4
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.c169
-rw-r--r--drivers/remoteproc/qcom_mdt_loader.h13
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c772
-rw-r--r--drivers/remoteproc/qcom_venus.c205
-rw-r--r--drivers/remoteproc/qcom_wcnss.c604
-rw-r--r--drivers/remoteproc/qcom_wcnss.h22
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c185
-rw-r--r--drivers/remoteproc/remoteproc_core.c44
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c3
-rw-r--r--drivers/scsi/ufs/ufshcd.h3
-rw-r--r--drivers/soc/qcom/Kconfig49
-rw-r--r--drivers/soc/qcom/Makefile6
-rw-r--r--drivers/soc/qcom/cpu_ops.c343
-rw-r--r--drivers/soc/qcom/glink.c5797
-rw-r--r--drivers/soc/qcom/glink_core_if.h213
-rw-r--r--drivers/soc/qcom/glink_debugfs.c783
-rw-r--r--drivers/soc/qcom/glink_private.h1044
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c2500
-rw-r--r--drivers/soc/qcom/glink_xprt_if.h201
-rw-r--r--drivers/soc/qcom/msm_bus/Makefile27
-rw-r--r--drivers/soc/qcom/msm_bus/msm-buspm-dev.c366
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_adhoc.h147
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_arb.c1137
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c1120
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_bimc.c2112
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_bimc.h127
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_board_8974.c2021
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_client_api.c142
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_config.c78
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_core.c119
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_core.h403
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_dbg.c903
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c589
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_fabric.c970
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c1262
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_id.c94
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_noc.c769
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_noc.h76
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_of.c703
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c642
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c257
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_rules.c634
-rw-r--r--drivers/soc/qcom/msm_bus/msm_buspm_coresight.c158
-rw-r--r--drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c189
-rw-r--r--drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c67
-rw-r--r--drivers/soc/qcom/smd-rpm.c105
-rw-r--r--drivers/soc/qcom/smd.c402
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/qcom/spm.c172
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c5
-rw-r--r--include/dt-bindings/arm/qcom-ids.h33
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h1
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8996.h1
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc.h105
-rw-r--r--include/dt-bindings/soc/msm-bus-ids.h661
-rw-r--r--include/dt-bindings/soc/msm-bus-rule-ops.h32
-rw-r--r--include/linux/clk-provider.h17
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/dma-attrs.h2
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ipc_logging.h290
-rw-r--r--include/linux/msm-bus-board.h198
-rw-r--r--include/linux/msm-bus.h200
-rw-r--r--include/linux/msm_audio.h367
-rw-r--r--include/linux/msm_audio_acdb.h81
-rw-r--r--include/linux/msm_bus_rules.h77
-rw-r--r--include/linux/msm_iommu_domains.h239
-rw-r--r--include/linux/nvmem-provider.h10
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/of_pci.h8
-rw-r--r--include/linux/pm_opp.h11
-rw-r--r--include/linux/qcom_iommu.h388
-rw-r--r--include/linux/qcom_scm.h31
-rw-r--r--include/linux/regulator/qcom_smd-regulator.h30
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/soc/qcom/smd-rpm.h4
-rw-r--r--include/linux/soc/qcom/smd.h17
-rw-r--r--include/linux/swiotlb.h2
-rw-r--r--include/soc/qcom/glink.h443
-rw-r--r--include/soc/qcom/glink_rpm_xprt.h78
-rw-r--r--include/soc/qcom/rpm-notifier.h63
-rw-r--r--include/soc/qcom/rpm-smd.h309
-rw-r--r--include/soc/qcom/socinfo.h610
-rw-r--r--include/soc/qcom/tracer_pkt.h130
-rw-r--r--include/sound/apr_audio.h1675
-rw-r--r--include/sound/hdmi-codec.h104
-rw-r--r--include/sound/msm-dai-q6.h45
-rw-r--r--include/sound/msm_hdmi_audio.h54
-rw-r--r--include/sound/pcm_iec958.h2
-rw-r--r--include/sound/q6adm.h50
-rw-r--r--include/sound/q6afe.h110
-rw-r--r--include/sound/q6asm.h334
-rw-r--r--include/sound/qdsp6v2/apr.h169
-rw-r--r--include/sound/qdsp6v2/apr_tal.h52
-rw-r--r--include/sound/qdsp6v2/audio_acdb.h61
-rw-r--r--include/sound/qdsp6v2/audio_def.h35
-rw-r--r--include/sound/qdsp6v2/audio_dev_ctl.h221
-rw-r--r--include/sound/qdsp6v2/dsp_debug.h22
-rw-r--r--include/sound/qdsp6v2/q6voice.h778
-rw-r--r--include/sound/qdsp6v2/rtac.h39
-rw-r--r--include/trace/events/trace_msm_bus.h146
-rw-r--r--kernel/configs/distro.config234
-rw-r--r--sound/core/pcm_iec958.c52
-rw-r--r--sound/soc/codecs/Kconfig10
-rw-r--r--sound/soc/codecs/Makefile5
-rw-r--r--sound/soc/codecs/hdmi-codec.c404
-rw-r--r--sound/soc/codecs/msm8x16-wcd-tables.c742
-rw-r--r--sound/soc/codecs/msm8x16-wcd.c2266
-rw-r--r--sound/soc/codecs/msm8x16-wcd.h234
-rw-r--r--sound/soc/codecs/msm8x16_wcd_registers.h518
-rw-r--r--sound/soc/qcom/Kconfig62
-rw-r--r--sound/soc/qcom/Makefile9
-rw-r--r--sound/soc/qcom/apq8016_sbc.c22
-rw-r--r--sound/soc/qcom/apq8064.c170
-rw-r--r--sound/soc/qcom/msm-dai-fe.c101
-rw-r--r--sound/soc/qcom/msm-dai-q6-hdmi.c298
-rw-r--r--sound/soc/qcom/msm-pcm-q6.c851
-rw-r--r--sound/soc/qcom/msm-pcm-q6.h96
-rw-r--r--sound/soc/qcom/msm-pcm-routing.c894
-rw-r--r--sound/soc/qcom/msm-pcm-routing.h145
-rw-r--r--sound/soc/qcom/qdsp6/Makefile2
-rw-r--r--sound/soc/qcom/qdsp6/core/Makefile3
-rw-r--r--sound/soc/qcom/qdsp6/core/apr.c649
-rw-r--r--sound/soc/qcom/qdsp6/core/apr_tal.c180
-rw-r--r--sound/soc/qcom/qdsp6/core/apr_v1.c131
-rw-r--r--sound/soc/qcom/qdsp6/core/audio_acdb.c865
-rw-r--r--sound/soc/qcom/qdsp6/core/dsp_debug.c259
-rw-r--r--sound/soc/qcom/qdsp6/core/q6audio_common.h35
-rw-r--r--sound/soc/qcom/qdsp6/core/q6core.c406
-rw-r--r--sound/soc/qcom/qdsp6/core/q6core.h52
-rw-r--r--sound/soc/qcom/qdsp6/core/rtac.c1046
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c1241
-rw-r--r--sound/soc/qcom/qdsp6/q6afe.c1826
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c3841
331 files changed, 80602 insertions, 2062 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index e8cf9cf873b37..9d1683aa30c3d 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -126,3 +126,12 @@ means that we won't try quite as hard to get them.
NOTE: At the moment DMA_ATTR_ALLOC_SINGLE_PAGES is only implemented on ARM,
though ARM64 patches will likely be posted soon.
+
+DMA_ATTR_STRONGLY_ORDERED
+-------------------------
+
+DMA_ATTR_STRONGLY_ORDERED allocates memory with a very restrictive type
+of mapping (no unaligned accesses, no re-ordering, no write merging, no
+buffering, no pre-fetching). This has severe performance penalties and
+should not be used for general purpose DMA allocations. It should only
+be used if one of the restrictions on strongly ordered memory is required.
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 3f0cbbb8395f8..8ad74de2aa56e 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -188,6 +188,8 @@ nodes to be present and contain the properties described below.
be one of:
"psci"
"spin-table"
+ "qcom,arm-cortex-acc"
+
# On ARM 32-bit systems this property is optional and
can be one of:
"allwinner,sun6i-a31"
diff --git a/Documentation/devicetree/bindings/arm/msm/acc.txt b/Documentation/devicetree/bindings/arm/msm/acc.txt
new file mode 100644
index 0000000000000..ae2d7253b363f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/acc.txt
@@ -0,0 +1,19 @@
+Application Processor Sub-system (APSS) Application Clock Controller (ACC)
+
+The ACC provides clock, power domain, and reset control to a CPU. There is one ACC
+register region per CPU within the APSS remapped region as well as an alias register
+region that remaps accesses to the ACC associated with the CPU accessing the region.
+
+Required properties:
+- compatible: Must be "qcom,arm-cortex-acc"
+- reg: The first element specifies the base address and size of
+ the register region. An optional second element specifies
+ the base address and size of the alias register region.
+
+Example:
+
+ clock-controller@b088000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b088000 0x1000>,
+ <0x0b008000 0x1000>;
+ }
diff --git a/Documentation/devicetree/bindings/arm/msm/ids.txt b/Documentation/devicetree/bindings/arm/msm/ids.txt
new file mode 100644
index 0000000000000..9ee8428f4670e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/ids.txt
@@ -0,0 +1,65 @@
+* MSM-ID
+
+The qcom,msm-id entry specifies the MSM chipset and hardware revision. It can
+optionally be an array of these to indicate multiple hardware that use the same
+device tree. It is expected that the bootloader will use this information at
+boot-up to decide which device tree to use when given multiple device trees,
+some of which may not be compatible with the actual hardware. It is the
+bootloader's responsibility to pass the correct device tree to the kernel.
+
+PROPERTIES
+
+- qcom,msm-id:
+ Usage: required
+ Value type: <prop-encoded-array> (<chipset_id, rev_id> [, <c2, r2> ..])
+ Definition:
+ The "chipset_id" consists of three fields as below:
+
+ bits 0-15 = The unique MSM chipset id.
+ bits 16-31 = Reserved. Should be 0
+
+ chipset_id is an exact match value
+
+ The "rev_id" is a chipset specific 32-bit id that represents
+ the version of the chipset.
+
+ The rev_id is a best match id. The bootloader will look for
+ the closest possible patch.
+
+* BOARD-ID
+
+The qcom,board-id entry specifies the board type and revision information. It
+can optionally be an array of these to indicate multiple boards that use the
+same device tree. It is expected that the bootloader will use this information
+at boot-up to decide which device tree to use when given multiple device trees,
+some of which may not be compatible with the actual hardware. It is the
+bootloader's responsibility to pass the correct device tree to the kernel.
+
+PROPERTIES
+
+- qcom,board-id:
+ Usage: required
+ Value type: <prop-encoded-array> (<board_id, subtype_id> [, <b2, s2> ..])
+ Definition:
+ The "board_id" consists of three fields as below:
+
+ bits 31-24 = Unusued.
+ bits 23-16 = Platform Version Major
+ bits 15-8 = Platfrom Version Minor
+ bits 7-0 = Platform Type
+
+ Platform Type field is an exact match value. The Platform
+ Major/Minor field is a best match. The bootloader will look
+ for the closest possible match.
+
+ The "subtype_id" is unique to a Platform Type/Chipset ID. For
+ a given Platform Type, there will typically only be a single
+ board and the subtype_id will be 0. However in some cases board
+ variants may need to be distinquished by different subtype_id
+ values.
+
+ subtype_id is an exact match value.
+
+EXAMPLE:
+ qcom,board-id = <15 2>;
+ qcom,msm-id = <0x1007e 0>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
index 1333db9acfee1..382a574a5c557 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
@@ -21,10 +21,17 @@ PROPERTIES
the register region. An optional second element specifies
the base address and size of the alias register region.
+- clock-output-names:
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpuX_aux where X is a
+ CPU number starting at 0.
+
Example:
clock-controller@2088000 {
compatible = "qcom,kpss-acc-v2";
reg = <0x02088000 0x1000>,
<0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
new file mode 100644
index 0000000000000..d1e12f16a28cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
@@ -0,0 +1,28 @@
+Krait Processor Sub-system (KPSS) Global Clock Controller (GCC)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be one of:
+ "qcom,kpss-gcc"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: base address and size of the register region
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpu_l2_aux indicating
+ an L2 cache auxiliary clock.
+
+Example:
+
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc";
+ reg = <0x2011000 0x1000>;
+ clock-output-names = "acpu_l2_aux";
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt
new file mode 100644
index 0000000000000..e7cb10426a3ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt
@@ -0,0 +1,38 @@
+Qualcomm Process Voltage Scaling Tables
+
+The node name is required to be "qcom,pvs". There shall only be one
+such node present in the root of the tree.
+
+PROPERTIES
+
+- qcom,pvs-format-a or qcom,pvs-format-b:
+ Usage: required
+ Value type: <empty>
+ Definition: Indicates the format of qcom,speedX-pvsY-bin-vZ properties.
+ If qcom,pvs-format-a is used the table is two columns
+ (frequency and voltage in that order). If qcom,pvs-format-b is used the table is three columns (frequency, voltage,
+ and current in that order).
+
+- qcom,speedX-pvsY-bin-vZ:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The PVS table corresponding to the speed bin X, pvs bin Y,
+ and version Z.
+Example:
+
+ qcom,pvs {
+ qcom,pvs-format-a;
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >,
+ < 1026000000 1125000 >,
+ < 1134000000 1175000 >,
+ < 1242000000 1200000 >,
+ < 1350000000 1225000 >,
+ < 1458000000 1237500 >,
+ < 1512000000 1250000 >;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53cc b/Documentation/devicetree/bindings/clock/qcom,a53cc
new file mode 100644
index 0000000000000..34f6cf8dd6ca6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a53cc
@@ -0,0 +1,25 @@
+Qualcomm A53 Clock Controller Binding
+------------------------------------------------
+The A53 Clock Controller provides higher frequency clocks
+and allows CPU frequency scaling on msm8916 based platforms.
+
+Required properties :
+- compatible : shall contain:
+ "qcom,a53cc"
+- reg : shall contain base register location and length
+ of the A53 PLL
+- #clock-cells : shall contain 1
+- qcom,apcs : phandle of apcs syscon node
+
+Example:
+ apcs: syscon@b011000 {
+ compatible = "syscon";
+ reg = <0x0b011000 0x1000>;
+ };
+
+ a53cc: clock-controller@0b016000 {
+ compatible = "qcom,clock-a53-msm8916";
+ reg = <0x0b016000 0x40>;
+ #clock-cells = <1>;
+ qcom,apcs = <&apcs>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 9a60fde32b02c..9a1c3d41e4fa9 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -21,6 +21,11 @@ Required properties :
- #reset-cells : shall contain 1
Optional properties :
+- Qualcomm TSENS (thermal sensor device) on some devices can
+be part of GCC and hence the TSENS properties can also be
+part of the GCC/clock-controller node.
+For more details on the TSENS properties please refer
+Documentation/devicetree/bindings/thermal/qcom-tsens.txt
- #power-domain-cells : shall contain 1
Example:
@@ -31,3 +36,16 @@ Example:
#reset-cells = <1>;
#power-domain-cells = <1>;
};
+
+Example of GCC with TSENS properties:
+ clock-controller@900000 {
+ compatible = "qcom,gcc-apq8064";
+ reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <1176 1176 1154 1176 1111
+ 1132 1132 1199 1132 1199 1132>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
new file mode 100644
index 0000000000000..fee92bb303445
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
@@ -0,0 +1,40 @@
+High-Frequency PLL (HFPLL)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,hfpll"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: address and size of HPLL registers. An optional second
+ element specifies the address and size of the alias
+ register region.
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the PLL. Typically hfpllX where X is a CPU number
+ starting at 0. Otherwise hfpll_Y where Y is more specific
+ such as "l2".
+
+Example:
+
+1) An HFPLL for the L2 cache.
+
+ clock-controller@f9016000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf9016000 0x30>;
+ clock-output-names = "hfpll_l2";
+ };
+
+2) An HFPLL for CPU0. This HFPLL has the alias register region.
+
+ clock-controller@f908a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll0";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
new file mode 100644
index 0000000000000..874138f88ec63
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
@@ -0,0 +1,22 @@
+Krait Clock Controller
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,krait-cc-v1"
+ "qcom,krait-cc-v2"
+
+- #clock-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 1
+
+Example:
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
new file mode 100644
index 0000000000000..87d3714b956a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -0,0 +1,37 @@
+Qualcomm RPM Clock Controller Binding
+------------------------------------------------
+The RPM is a dedicated hardware engine for managing the shared
+SoC resources in order to keep the lowest power profile. It
+communicates with other hardware subsystems via shared memory
+and accepts clock requests, aggregates the requests and turns
+the clocks on/off or scales them on demand.
+
+Required properties :
+- compatible : shall contain only one of the following. The generic
+ compatible "qcom,rpmcc" should be also included.
+
+ "qcom,rpmcc-msm8916", "qcom,rpmcc"
+ "qcom,rpmcc-apq8064", "qcom,rpmcc"
+
+- #clock-cells : shall contain 1
+
+Example:
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ rpmcc: clock-controller {
+ compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 96c25ee01501f..420da5aa1dc35 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -1,13 +1,19 @@
-Analog Device ADV7511(W)/13 HDMI Encoders
+Analog Device ADV7511(W)/13/33 HDMI Encoders
-----------------------------------------
-The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
+The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP.
+S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
+the others support RGB interface.
Required properties:
-- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
+- compatible: Should be one of:
+ "adi,adv7511"
+ "adi,adv7511w"
+ "adi,adv7513"
+ "adi,adv7533"
+
- reg: I2C slave address
The ADV7511 supports a large number of input data formats that differ by their
@@ -32,6 +38,11 @@ The following input format properties are required except in "rgb 1x" and
- adi,input-justification: The input bit justification ("left", "evenly",
"right").
+The following properties are required for ADV7533:
+
+- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
+ be one of 1, 2, 3 or 4.
+
Optional properties:
- interrupts: Specifier for the ADV7511 interrupt
@@ -42,13 +53,17 @@ Optional properties:
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
+- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
+ generator. The chip will rely on the sync signals in the DSI data lanes,
+ rather than generate its own timings for HDMI output.
Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-- Video port 0 for the RGB or YUV input
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+ remote endpoint phandle should refer to a valid mipi_dsi_host device node.
- Video port 1 for the HDMI output
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
new file mode 100644
index 0000000000000..debcd3266c8af
--- /dev/null
+++ b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
@@ -0,0 +1,25 @@
+QCOM Secure Channel Manager (SCM)
+
+Qualcomm processors include an interface to communicate to the secure firmware.
+This interface allows for clients to request different types of actions. These
+can include CPU power up/down, HDCP requests, loading of firmware, and other
+assorted actions.
+
+Required properties:
+- compatible: must contain "qcom,scm"
+- clocks: Should contain the core, iface, and bus clocks.
+- clock-names: Must contain "core" for the core clock, "iface" for the interface
+ clock and "bus" for the bus clock.
+
+Example:
+
+ firmware {
+ compatible = "simple-bus";
+
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>, <&gcc GCC_CE1_AHB_CLK>;
+ clock-names = "core", "bus", "iface";
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
index 07bf55f6e0b9a..f3ae70b50b1e1 100644
--- a/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
+++ b/Documentation/devicetree/bindings/input/qcom,pm8941-pwrkey.txt
@@ -32,6 +32,17 @@ PROPERTIES
Definition: presence of this property indicates that the KPDPWR_N pin
should be configured for pull up.
+- resin-pull-up:
+ Usage: optional
+ Value type: <empty>
+ Definition: presence of this property indicates that the RESIN_N pin
+ should be configured for pull up.
+
+- linux,code:
+ Usage: optional
+ Value type: <empty>
+ Definition: Keycode to emit when RESIN_N input change its state.
+
EXAMPLE
pwrkey@800 {
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 718074501fcbc..a213f74a03d8d 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -34,12 +34,11 @@ conditions.
interrupt per context bank. In the case of a single,
combined interrupt, it must be listed multiple times.
-- mmu-masters : A list of phandles to device nodes representing bus
- masters for which the SMMU can provide a translation
- and their corresponding StreamIDs (see example below).
- Each device node linked from this list must have a
- "#stream-id-cells" property, indicating the number of
- StreamIDs associated with it.
+- #iommu-cells : See Documentation/devicetree/bindings/iommu/iommu.txt
+ for details. Should be 1, where each "iommus" entry on
+ the device represents a distinct stream ID emitted by
+ that device into the relevant SMMU. Cells beyond 1 are
+ reserved for future use.
** System MMU optional properties:
@@ -55,9 +54,19 @@ conditions.
aliases of secure registers have to be used during
SMMU configuration.
-Example:
+** Deprecated properties:
+
+- mmu-masters (deprecated in favour of the generic "iommus" binding) :
+ A list of phandles to device nodes representing bus
+ masters for which the SMMU can provide a translation
+ and their corresponding StreamIDs (see example below).
+ Each device node linked from this list must have a
+ "#stream-id-cells" property, indicating the number of
+ StreamIDs associated with it.
+
+** Example:
- smmu {
+ smmu1: iommu {
compatible = "arm,smmu-v1";
reg = <0xba5e0000 0x10000>;
#global-interrupts = <2>;
@@ -67,11 +76,12 @@ Example:
<0 35 4>,
<0 36 4>,
<0 37 4>;
+ #iommu-cells = <1>;
+ };
- /*
- * Two DMA controllers, the first with two StreamIDs (0xd01d
- * and 0xd01e) and the second with only one (0xd11c).
- */
- mmu-masters = <&dma0 0xd01d 0xd01e>,
- <&dma1 0xd11c>;
+ /* device with two stream IDs, 0 and 7 */
+ master {
+ iommus = <&smmu1 0>,
+ <&smmu1 7>;
};
+
diff --git a/Documentation/devicetree/bindings/pci/pci-iommu.txt b/Documentation/devicetree/bindings/pci/pci-iommu.txt
new file mode 100644
index 0000000000000..56c829621b9a8
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-iommu.txt
@@ -0,0 +1,171 @@
+This document describes the generic device tree binding for describing the
+relationship between PCI(e) devices and IOMMU(s).
+
+Each PCI(e) device under a root complex is uniquely identified by its Requester
+ID (AKA RID). A Requester ID is a triplet of a Bus number, Device number, and
+Function number.
+
+For the purpose of this document, when treated as a numeric value, a RID is
+formatted such that:
+
+* Bits [15:8] are the Bus number.
+* Bits [7:3] are the Device number.
+* Bits [2:0] are the Function number.
+* Any other bits required for padding must be zero.
+
+IOMMUs may distinguish PCI devices through sideband data derived from the
+Requester ID. While a given PCI device can only master through one IOMMU, a
+root complex may split masters across a set of IOMMUs (e.g. with one IOMMU per
+bus).
+
+The generic 'iommus' property is insufficient to describe this relationship,
+and a mechanism is required to map from a PCI device to its IOMMU and sideband
+data.
+
+For generic IOMMU bindings, see
+Documentation/devicetree/bindings/iommu/iommu.txt.
+
+
+PCI root complex
+================
+
+Optional properties
+-------------------
+
+- iommu-map: Maps a Requester ID to an IOMMU and associated iommu-specifier
+ data.
+
+ The property is an arbitrary number of tuples of
+ (rid-base,iommu,iommu-base,length).
+
+ Any RID r in the interval [rid-base, rid-base + length) is associated with
+ the listed IOMMU, with the iommu-specifier (r - rid-base + iommu-base).
+
+- iommu-map-mask: A mask to be applied to each Requester ID prior to being
+ mapped to an iommu-specifier per the iommu-map property.
+
+
+Example (1)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID,
+ * identity-mapped.
+ */
+ iommu-map = <0x0 &iommu 0x0 0x10000>;
+ };
+};
+
+
+Example (2)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID with the
+ * function bits masked out.
+ */
+ iommu-map = <0x0 &iommu 0x0 0x10000>;
+ iommu-map-mask = <0xfff8>;
+ };
+};
+
+
+Example (3)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * The sideband data provided to the IOMMU is the RID,
+ * but the high bits of the bus number are flipped.
+ */
+ iommu-map = <0x0000 &iommu 0x8000 0x8000>,
+ <0x8000 &iommu 0x0000 0x8000>;
+ };
+};
+
+
+Example (4)
+===========
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ iommu_a: iommu@a {
+ reg = <0xa 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ iommu_b: iommu@b {
+ reg = <0xb 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ iommu_c: iommu@c {
+ reg = <0xc 0x1>;
+ compatible = "vendor,some-iommu";
+ #iommu-cells = <1>;
+ };
+
+ pci: pci@f {
+ reg = <0xf 0x1>;
+ compatible = "vendor,pcie-root-complex";
+ device_type = "pci";
+
+ /*
+ * Devices with bus number 0-127 are mastered via IOMMU
+ * a, with sideband data being RID[14:0].
+ * Devices with bus number 128-255 are mastered via
+ * IOMMU b, with sideband data being RID[14:0].
+ * No devices master via IOMMU c.
+ */
+ iommu-map = <0x0000 &iommu_a 0x0000 0x8000>,
+ <0x8000 &iommu_b 0x0000 0x8000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index d00bfd8624a50..46c6f3ed1a1c5 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -7,6 +7,7 @@ Qualcomm SPMI Regulators
"qcom,pm8841-regulators"
"qcom,pm8916-regulators"
"qcom,pm8941-regulators"
+ "qcom,pm8994-regulators"
- interrupts:
Usage: optional
@@ -68,6 +69,37 @@ Qualcomm SPMI Regulators
Definition: Reference to regulator supplying the input pin, as
described in the data sheet.
+- vdd_s1-supply:
+- vdd_s2-supply:
+- vdd_s3-supply:
+- vdd_s4-supply:
+- vdd_s5-supply:
+- vdd_s6-supply:
+- vdd_s7-supply:
+- vdd_s8-supply:
+- vdd_s9-supply:
+- vdd_s10-supply:
+- vdd_s11-supply:
+- vdd_s12-supply:
+- vdd_l1-supply:
+- vdd_l2_l26_l28-supply:
+- vdd_l3_l11-supply:
+- vdd_l4_l27_l31-supply:
+- vdd_l5_l7-supply:
+- vdd_l6_l12_l32-supply:
+- vdd_l8_l16_l30-supply:
+- vdd_l9_l10_l18_l22-supply:
+- vdd_l13_l19_l23_l24-supply:
+- vdd_l14_l15-supply:
+- vdd_l17_l29-supply:
+- vdd_l20_l21-supply:
+- vdd_l25-supply:
+- vdd_lvs_1_2-supply:
+ Usage: optional (pm8994 only)
+ Value type: <phandle>
+ Definition: Reference to regulator supplying the input pin, as
+ described in the data sheet.
+
The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
@@ -85,6 +117,11 @@ pm8941:
l15, l16, l17, l18, l19, l20, l21, l22, l23, l24, lvs1, lvs2, lvs3,
mvs1, mvs2
+pm8994:
+ s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, l1, l2, l3, l4, l5,
+ l6, l7, l8, l9, l10, l11, l12, l13, l14, l15, l16, l17, l18, l19, l20,
+ l21, l22, l23, l24, l25, l26, l27, l28, l29, l30, l31, l32, lvs1, lvs2
+
The content of each sub-node is defined by the standard binding for regulators -
see regulator.txt - with additional custom properties described below:
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
new file mode 100644
index 0000000000000..1c0edada6bf0a
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,wcnss-pil.txt
@@ -0,0 +1,132 @@
+Qualcomm WCNSS Peripheral Image Loader
+
+This document defines the binding for a component that loads and boots firmware
+on the Qualcomm WCNSS core.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,riva-pil",
+ "qcom,pronto-v1-pil",
+ "qcom,pronto-v2-pil"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must specify the base address and size of the CCU, DXE and
+ PMU register blocks
+
+- reg-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be "ccu", "dxe", "pmu"
+
+- interrupts-extended:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: must list the watchdog and fatal IRQs and may specify the
+ ready, handover and stop-ack IRQs
+
+- interrupt-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: should be "wdog", "fatal", optionally followed by "ready",
+ "handover", "stop-ack"
+
+- vddmx-supply:
+- vddcx-supply:
+- vddpx-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the WCNSS core
+
+- qcom,state:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: reference to the SMEM state used to indicate to WCNSS that
+ it should shut down
+
+- qcom,state-names:
+ Usage: optional
+ Value type: <stringlist>
+ Definition: should be "stop"
+
+- memory-region:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to reserved-memory node for the remote processor
+ see ../reserved-memory/reserved-memory.txt
+
+= SUBNODES
+A single subnode of the WCNSS PIL describes the attached rf module and its
+resource dependencies.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,wcn3620",
+ "qcom,wcn3660",
+ "qcom,wcn3680"
+
+- clocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: should specify the xo clock and optionally the rf clock
+
+- clock-names:
+ Usage: required
+ Value type: <stringlist>
+ Definition: should be "xo", optionally followed by "rf"
+
+- vddxo-supply:
+- vddrfa-supply:
+- vddpa-supply:
+- vdddig-supply:
+ Usage: required
+ Value type: <phandle>
+ Definition: reference to the regulators to be held on behalf of the
+ booting of the WCNSS core
+
+= EXAMPLE
+The following example describes the resources needed to boot control the WCNSS,
+with attached WCN3680, as it is commonly found on MSM8974 boards.
+
+pronto@fb204000 {
+ compatible = "qcom,pronto-v2-pil";
+ reg = <0xfb204000 0x2000>, <0xfb202000 0x1000>, <0xfb21b000 0x3000>;
+ reg-names = "ccu", "dxe", "pmu";
+
+ interrupts-extended = <&intc 0 149 1>,
+ <&wcnss_smp2p_slave 0 0>,
+ <&wcnss_smp2p_slave 1 0>,
+ <&wcnss_smp2p_slave 2 0>,
+ <&wcnss_smp2p_slave 3 0>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ vddmx-supply = <&pm8841_s1>;
+ vddcx-supply = <&pm8841_s2>;
+ vddpx-supply = <&pm8941_s3>;
+
+ qcom,state = <&wcnss_smp2p_out 0>;
+ qcom,state-names = "stop";
+
+ memory-region = <&wcnss_region>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcnss_pin_a>;
+
+ iris {
+ compatible = "qcom,wcn3680";
+
+ clocks = <&rpmcc RPM_CXO_CLK_SRC>, <&rpmcc RPM_CXO_A2>;
+ clock-names = "xo", "rf";
+
+ vddxo-supply = <&pm8941_l6>;
+ vddrfa-supply = <&pm8941_l11>;
+ vddpa-supply = <&pm8941_l19>;
+ vdddig-supply = <&pm8941_s3>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index 48129368d4d97..593a12a67ba15 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -16,6 +16,23 @@ Required properties:
* "spkr-iomux"
- qcom,model : Name of the sound card.
+- qcom,audio-routing : A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the
+ connection's sink, the second being the connection's
+ source. Valid names could be power supplies, MicBias
+ of msm8x16_wcd codec and the jacks on the board:
+
+ Power supplies:
+ * MIC BIAS External
+ * MIC BIAS Internal1
+ * MIC BIAS Internal2
+
+ Board connectors:
+ * Headset Mic
+ * Secondary Mic",
+ * DMIC
+ * Ext Spk
+
Dai-link subnode properties and subnodes:
Required dai-link subnodes:
@@ -37,6 +54,18 @@ sound: sound {
reg-names = "mic-iomux", "spkr-iomux";
qcom,model = "DB410c";
+ qcom,audio-routing =
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS Internal2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External",
+ "DMIC1", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic1",
+ "DMIC2", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic2";
+
/* I2S - Internal codec */
internal-dai-link@0 {
cpu { /* PRIMARY */
diff --git a/MAINTAINERS b/MAINTAINERS
index 9c567a431d8d4..4ac8989463f32 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5916,6 +5916,7 @@ F: include/uapi/linux/ipmi*
QCOM AUDIO (ASoC) DRIVERS
M: Patrick Lai <plai@codeaurora.org>
M: Banajit Goswami <bgoswami@codeaurora.org>
+L: linux-arm-msm@vger.kernel.org
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/qcom/
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 95c1923ce6fa3..93987d626da2c 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -539,6 +539,8 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
dtb-$(CONFIG_ARCH_PRIMA2) += \
prima2-evb.dtb
dtb-$(CONFIG_ARCH_QCOM) += \
+ qcom-apq8064-arrow-db600c.dtb \
+ qcom-apq8064-eI_ERAGON600.dtb \
qcom-apq8064-cm-qs600.dtb \
qcom-apq8064-ifc6410.dtb \
qcom-apq8064-sony-xperia-yuga.dtb \
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi
new file mode 100644
index 0000000000000..a3efb9704fcd9
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c-pins.dtsi
@@ -0,0 +1,52 @@
+&tlmm_pinmux {
+ card_detect: card-detect {
+ mux {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+
+ pcie_pins: pcie-pinmux {
+ mux {
+ pins = "gpio27";
+ function = "gpio";
+ };
+ conf {
+ pins = "gpio27";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ };
+
+ user_leds: user-leds {
+ mux {
+ pins = "gpio3", "gpio7", "gpio10", "gpio11";
+ function = "gpio";
+ };
+
+ conf {
+ pins = "gpio3", "gpio7", "gpio10", "gpio11";
+ function = "gpio";
+ output-low;
+ };
+ };
+
+ magneto_pins: magneto-pins {
+ mux {
+ pins = "gpio31", "gpio48";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+};
+
+&pm8921_mpps {
+ mpp_leds: mpp-leds {
+ pinconf {
+ pins = "mpp7", "mpp8";
+ function = "digital";
+ output-low;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts
new file mode 100644
index 0000000000000..e12d7f55f5cdd
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-arrow-db600c.dts
@@ -0,0 +1,376 @@
+#include "qcom-apq8064-v2.0.dtsi"
+#include "qcom-apq8064-arrow-db600c-pins.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "Arrow Electronics, APQ8064 DB600c";
+ compatible = "arrow,db600c", "qcom,apq8064";
+
+ aliases {
+ serial0 = &gsbi7_serial;
+ serial1 = &gsbi1_serial;
+ i2c0 = &gsbi2_i2c;
+ i2c1 = &gsbi3_i2c;
+ i2c2 = &gsbi4_i2c;
+ i2c3 = &gsbi7_i2c;
+ spi0 = &gsbi5_spi;
+ };
+
+ regulators {
+ compatible = "simple-bus";
+ vph: regulator-fixed@1 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <4500000>;
+ regulator-max-microvolt = <4500000>;
+ regulator-name = "VPH";
+ regulator-type = "voltage";
+ regulator-boot-on;
+ };
+
+ /* on board fixed 3.3v supply */
+ vcc3v3: vcc3v3 {
+ compatible = "regulator-fixed";
+ regulator-name = "VCC3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ };
+
+ soc {
+ rpm@108000 {
+ regulators {
+ vdd_s1-supply = <&vph>;
+ vdd_s2-supply = <&vph>;
+ vdd_s3-supply = <&vph>;
+ vdd_s4-supply = <&vph>;
+ vdd_s5-supply = <&vph>;
+ vdd_s6-supply = <&vph>;
+ vdd_s7-supply = <&vph>;
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+ vdd_l3_l15_l17-supply = <&vph>;
+ vdd_l4_l14-supply = <&vph>;
+ vdd_l5_l8_l16-supply = <&vph>;
+ vdd_l6_l7-supply = <&vph>;
+ vdd_l9_l11-supply = <&vph>;
+ vdd_l10_l22-supply = <&vph>;
+ vdd_l21_l23_l29-supply = <&vph>;
+ vdd_l24-supply = <&pm8921_s1>;
+ vdd_l25-supply = <&pm8921_s1>;
+ vdd_l26-supply = <&pm8921_s7>;
+ vdd_l27-supply = <&pm8921_s7>;
+ vdd_l28-supply = <&pm8921_s7>;
+ vin_lvs1_3_6-supply = <&pm8921_s4>;
+ vin_lvs2-supply = <&pm8921_s1>;
+ vin_lvs4_5_7-supply = <&pm8921_s4>;
+
+ s1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,switch-mode-frequency = <3200000>;
+ bias-pull-down;
+ };
+
+ s3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ qcom,switch-mode-frequency = <4800000>;
+ };
+
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <3200000>;
+ bias-pull-down;
+ regulator-always-on;
+ };
+
+ s7 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ l3 {
+ regulator-min-microvolt = <3050000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+
+ l4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+
+ l5 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <3000000>;
+ bias-pull-down;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ l6 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ bias-pull-down;
+ };
+
+ l23 {
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ bias-pull-down;
+ };
+
+ lvs6 {
+ bias-pull-down;
+ };
+
+ lvs7 {
+ bias-pull-down;
+ };
+ };
+ };
+
+ gsbi@12440000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_UART_W_FC>;
+ serial@12450000 {
+ label = "LS-UART1";
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsbi1_uart_4pins>;
+ };
+ };
+
+ gsbi@12480000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+ i2c@124a0000 {
+ /* On Low speed expansion and Sensors */
+ label = "LS-I2C0";
+ status = "okay";
+ lis3mdl_mag@1e {
+ compatible = "st,lis3mdl-magn";
+ reg = <0x1e>;
+ vdd-supply = <&vcc3v3>;
+ vddio-supply = <&pm8921_s4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&magneto_pins>;
+ interrupt-parent = <&tlmm_pinmux>;
+
+ st,drdy-int-pin = <2>;
+ interrupts = <48 IRQ_TYPE_EDGE_RISING>, /* DRDY line */
+ <31 IRQ_TYPE_EDGE_RISING>; /* INT */
+ };
+ };
+ };
+
+ gsbi@16200000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+ i2c@16280000 {
+ /* On Low speed expansion */
+ status = "okay";
+ label = "LS-I2C1";
+ clock-frequency = <200000>;
+ eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ pagesize = <64>;
+ };
+ };
+ };
+
+ gsbi@16300000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+ i2c@16380000 {
+ /* On High speed expansion */
+ label = "HS-CAM-I2C3";
+ status = "okay";
+ };
+ };
+
+ gsbi@1a200000 {
+ status = "okay";
+ spi@1a280000 {
+ /* On Low speed expansion */
+ label = "LS-SPI0";
+ status = "okay";
+ };
+ };
+
+ /* DEBUG UART */
+ gsbi@16600000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C_UART>;
+ serial@16640000 {
+ label = "LS-UART0";
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsbi7_uart_2pins>;
+ };
+
+ i2c@16680000 {
+ /* On High speed expansion */
+ status = "okay";
+ label = "HS-CAM-I2C2";
+ };
+ };
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&user_leds>, <&mpp_leds>;
+
+ compatible = "gpio-leds";
+
+ user-led0 {
+ label = "user0-led";
+ gpios = <&tlmm_pinmux 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ user-led1 {
+ label = "user1-led";
+ gpios = <&tlmm_pinmux 7 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ user-led2 {
+ label = "user2-led";
+ gpios = <&tlmm_pinmux 10 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ user-led3 {
+ label = "user3-led";
+ gpios = <&tlmm_pinmux 11 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+
+ wifi-led {
+ label = "WiFi-led";
+ gpios = <&pm8921_mpps 7 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ bt-led {
+ label = "BT-led";
+ gpios = <&pm8921_mpps 8 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ pci@1b500000 {
+ status = "okay";
+ vdda-supply = <&pm8921_s3>;
+ vdda_phy-supply = <&pm8921_lvs6>;
+ vdda_refclk-supply = <&vcc3v3>;
+ pinctrl-0 = <&pcie_pins>;
+ pinctrl-names = "default";
+ perst-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+ };
+
+ phy@1b400000 {
+ status = "okay";
+ };
+
+ sata@29000000 {
+ status = "okay";
+ target-supply = <&pm8921_lvs7>;
+ };
+
+ /* OTG */
+ phy@12500000 {
+ status = "okay";
+ dr_mode = "peripheral";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l4>;
+ };
+
+ phy@12520000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ phy@12530000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ gadget@12500000 {
+ status = "okay";
+ };
+
+ /* OTG */
+ usb@12500000 {
+ status = "okay";
+ };
+
+ usb@12520000 {
+ status = "okay";
+ };
+
+ usb@12530000 {
+ status = "okay";
+ };
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&vcc3v3>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ };
+
+ amba {
+ /* eMMC */
+ sdcc@12400000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l5>;
+ vqmmc-supply = <&pm8921_s4>;
+ };
+
+ /* External micro SD card */
+ sdcc@12180000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&card_detect>;
+ cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_HIGH>;
+ };
+ };
+ };
+};
+
+
+&CPU0 {
+ cpu-supply = <&saw0>;
+};
+
+&CPU1 {
+ cpu-supply = <&saw1>;
+};
+
+&CPU2 {
+ cpu-supply = <&saw2>;
+};
+
+&CPU3 {
+ cpu-supply = <&saw3>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
index c535b3f0e5cfd..32fedfa149d0f 100644
--- a/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-asus-nexus7-flo.dts
@@ -224,6 +224,12 @@
reg = <0x52>;
pagesize = <32>;
};
+
+ bq27541@55 {
+ compatible = "ti,bq27541";
+ reg = <0x55>;
+ };
+
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
index 35f1d46edded1..a4f6550340702 100644
--- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
@@ -123,6 +123,21 @@
};
};
+ hdmi_phy: qcom,hdmi-phy@4a00400 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ };
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&v3p3_fixed>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ };
+
gsbi@12440000 {
status = "okay";
qcom,mode = <GSBI_PROT_I2C>;
diff --git a/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi b/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi
new file mode 100644
index 0000000000000..6a8c4f2222f54
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-coresight.dtsi
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/qcom,rpmcc.h>
+&soc {
+
+ etb@1a01000 {
+ compatible = "coresight-etb10", "arm,primecell";
+ reg = <0x1a01000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ etb_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out0>;
+ };
+ };
+ };
+
+ tpiu@1a03000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x1a03000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ port {
+ tpiu_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out1>;
+ };
+ };
+ };
+
+ replicator {
+ compatible = "arm,coresight-replicator";
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etb_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ replicator_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel_out>;
+ };
+ };
+ };
+ };
+
+ funnel@1a04000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x1a04000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 2 - connected to STM component
+ * 3 - not-connected
+ * 6 - not-connected
+ * 7 - not-connected
+ */
+ port@0 {
+ reg = <0>;
+ funnel_in0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+ port@4 {
+ reg = <4>;
+ funnel_in4: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+ port@5 {
+ reg = <5>;
+ funnel_in5: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ funnel_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ };
+ };
+
+ etm@1a1c000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1c000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU0>;
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel_in0>;
+ };
+ };
+ };
+
+ etm@1a1d000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1d000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU1>;
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel_in1>;
+ };
+ };
+ };
+
+ etm@1a1e000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1e000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU2>;
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel_in4>;
+ };
+ };
+ };
+
+ etm@1a1f000 {
+ compatible = "arm,coresight-etm3x", "arm,primecell";
+ reg = <0x1a1f000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>;
+ clock-names = "apb_pclk";
+
+ cpu = <&CPU3>;
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel_in5>;
+ };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts
new file mode 100644
index 0000000000000..e094b5be22639
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts
@@ -0,0 +1,427 @@
+#include "qcom-apq8064-v2.0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+ model = "Qualcomm APQ8064/ERAGON600";
+ compatible = "qcom,apq8064-eragon600", "qcom,apq8064";
+
+ aliases {
+ serial0 = &gsbi7_serial;
+ serial1 = &gsbi6_serial;
+ };
+
+ soc {
+ pinctrl@800000 {
+ card_detect: card_detect {
+ mux {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+ };
+
+ rpm@108000 {
+ regulators {
+ vin_lvs1_3_6-supply = <&pm8921_s4>;
+ vin_lvs2-supply = <&pm8921_s1>;
+ vin_lvs4_5_7-supply = <&pm8921_s4>;
+
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+ vdd_l24-supply = <&pm8921_s1>;
+ vdd_l25-supply = <&pm8921_s1>;
+ vdd_l26-supply = <&pm8921_s7>;
+ vdd_l27-supply = <&pm8921_s7>;
+ vdd_l28-supply = <&pm8921_s7>;
+
+
+ /* Buck SMPS */
+ pm8921_s1: s1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,switch-mode-frequency = <3200000>;
+ bias-pull-down;
+ };
+
+ pm8921_s3: s3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ qcom,switch-mode-frequency = <4800000>;
+ };
+
+ pm8921_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <3200000>;
+ qcom,force-mode = <3>;
+ };
+
+ pm8921_s7: s7 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ pm8921_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
+ pm8921_l3: l3 {
+ regulator-min-microvolt = <3050000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+
+ pm8921_l4: l4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+
+ pm8921_l5: l5 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <3000000>;
+ bias-pull-down;
+ };
+
+ pm8921_l6: l6 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ bias-pull-down;
+ };
+
+ pm8921_l23: l23 {
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ bias-pull-down;
+ };
+
+ pm8921_lvs1: lvs1 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs6: lvs6 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs7: lvs7 {
+ bias-pull-down;
+ };
+ };
+ };
+
+ ext_3p3v: regulator-fixed@1 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "ext_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&tlmm_pinmux 77 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&ext_3p3v>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ lvds-vccs-3p3v-supply = <&ext_3p3v>;
+ lvds-pll-vdda-supply = <&pm8921_l2>;
+ lvds-vdda-supply = <&pm8921_lvs7>;
+
+ port {
+ lvds_out: endpoint {
+ remote-endpoint = <&data_image_in>;
+ };
+ };
+ };
+
+ panel_3p3v: panel_3p3v {
+ pinctrl-0 = <&pwm_en_gpios>;
+ pinctrl-names = "default";
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "panel_en_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ backlight: backlight{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&pm8921_gpio 26 GPIO_ACTIVE_HIGH>;
+ default-on;
+ };
+
+ levelshifter: levelshifter{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&tlmm_pinmux 85 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <2000>;
+ default-on;
+ };
+
+ panel: data_image,scf0700C48ggu21 {
+ status = "okay";
+ compatible = "data_image,scf0700C48ggu21";
+
+ ddc-i2c-bus = <&i2c3>;
+ backlight = <&backlight>;
+ power-supply = <&panel_3p3v>;
+ port {
+ data_image_in: endpoint {
+ remote-endpoint = <&lvds_out>;
+ };
+ };
+ };
+
+ gsbi3: gsbi@16200000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+ i2c3: i2c@16280000 {
+ status = "okay";
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+ };
+ };
+
+ gsbi@12440000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+
+ i2c@12460000 {
+ status = "okay";
+ clock-frequency = <200000>;
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+
+ eeprom: eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ pagesize = <32>;
+ };
+ };
+ };
+
+ gsbi@16500000 {
+ status = "ok";
+ qcom,mode = <GSBI_PROT_UART_W_FC>;
+
+ serial@16540000 {
+ status = "ok";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&gsbi6_uart_4pins>;
+ };
+ };
+
+ gsbi@16600000 {
+ status = "ok";
+ qcom,mode = <GSBI_PROT_I2C_UART>;
+ serial@16640000 {
+ status = "ok";
+ };
+ };
+
+ adm: dma@18320000 {
+ status = "okay";
+ };
+
+ sata_phy0: phy@1b400000 {
+ status = "okay";
+ };
+
+ sata0: sata@29000000 {
+ status = "okay";
+ target-supply = <&pm8921_s4>;
+ };
+
+ /* OTG */
+ usb1_phy: phy@12500000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l4>;
+ };
+
+ usb3_phy: phy@12520000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ usb4_phy: phy@12530000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ gadget1: gadget@12500000 {
+ status = "okay";
+ };
+
+ /* OTG */
+ usb1: usb@12500000 {
+ status = "okay";
+ };
+
+ usb3: usb@12520000 {
+ status = "okay";
+ };
+
+ usb4: usb@12530000 {
+ status = "okay";
+ };
+
+ /* on board fixed 3.3v supply */
+ v3p3_pcieclk: v3p3-pcieclk {
+ compatible = "regulator-fixed";
+ regulator-name = "PCIE V3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ pci@1b500000 {
+ status = "ok";
+ pcie-clk-supply = <&v3p3_pcieclk>;
+ avdd-supply = <&pm8921_s3>;
+ vdd-supply = <&pm8921_lvs6>;
+ ext-3p3v-supply = <&ext_3p3v>;
+ qcom,external-phy-refclk;
+ reset-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&notify_led>;
+
+ led@1 {
+ label = "apq8064:green:user1";
+ gpios = <&pm8921_gpio 18 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+ };
+
+ qcom,ssbi@500000 {
+ pmicintc: pmic@0 {
+ pm8921_gpio: gpio@150 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_gpios &bt_gpios>;
+
+ pwm_bl_gpios: pwm-bl-gpios {
+ pios {
+ pins = "gpio26";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <1>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ pwm_en_gpios: pwm-en-gpios {
+ pios {
+ pins = "gpio36";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <1>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ wlan_default_gpios: wlan-gpios {
+ pios {
+ pins = "gpio43";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ bt_gpios: bt-gpio {
+ pios {
+ pins = "gpio44";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ notify_led: nled {
+ pios {
+ pins = "gpio18";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+ };
+ };
+ };
+ sdcc4_pwrseq:pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>,
+ <&pm8921_gpio 44 GPIO_ACTIVE_LOW>;
+ };
+
+ amba {
+ /* eMMC */
+ sdcc1: sdcc@12400000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l5>;
+ vqmmc-supply = <&pm8921_s4>;
+ };
+
+ /* External micro SD card */
+ sdcc3: sdcc@12180000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&card_detect>;
+ cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
+ };
+ /* WLAN */
+ sdcc4: sdcc@121c0000 {
+ status = "okay";
+ vmmc-supply = <&ext_3p3v>;
+ vqmmc-supply = <&pm8921_lvs1>;
+ mmc-pwrseq = <&sdcc4_pwrseq>;
+ };
+ };
+ };
+};
+
+&CPU0 {
+ cpu-supply = <&saw0>;
+};
+
+&CPU1 {
+ cpu-supply = <&saw1>;
+};
+
+&CPU2 {
+ cpu-supply = <&saw2>;
+};
+
+&CPU3 {
+ cpu-supply = <&saw3>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index 2eeb0904eaa79..8ddb5ce0e9d9f 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -25,9 +25,10 @@
sdcc4_pwrseq: sdcc4_pwrseq {
pinctrl-names = "default";
- pinctrl-0 = <&wlan_default_gpios>;
+ pinctrl-0 = <&wlan_default_gpios &bt_gpios>;
compatible = "mmc-pwrseq-simple";
- reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>,
+ <&pm8921_gpio 44 GPIO_ACTIVE_LOW>;
};
};
@@ -107,6 +108,12 @@
qcom,switch-mode-frequency = <3200000>;
};
+ l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
l3 {
regulator-min-microvolt = <3050000>;
regulator-max-microvolt = <3300000>;
@@ -137,6 +144,12 @@
bias-pull-down;
};
+ l26 {
+ regulator-min-microvolt = < 375000>;
+ regulator-max-microvolt = <1050000>;
+ bias-pull-down;
+ };
+
lvs1 {
bias-pull-down;
};
@@ -144,6 +157,10 @@
lvs6 {
bias-pull-down;
};
+
+ lvs7 {
+ bias-pull-down;
+ };
};
};
@@ -159,6 +176,71 @@
regulator-boot-on;
};
+ hdmi_phy: qcom,hdmi-phy@4a00400 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ };
+
+ pil_q6v4: pil@28800000 {
+ qcom,pll-supply = <&pm8921_l26>;
+ qcom,pll-uV = <1050000>;
+ };
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&ext_3p3v>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ lvds-vccs-3p3v-supply = <&ext_3p3v>;
+ lvds-pll-vdda-supply = <&pm8921_l2>;
+ lvds-vdda-supply = <&pm8921_lvs7>;
+
+ port {
+ lvds_out: endpoint {
+ remote-endpoint = <&auo_in>;
+ };
+ };
+ };
+
+ panel_3p3v: panel_3p3v {
+ compatible = "regulator-fixed";
+ pinctrl-0 = <&disp_en_gpios>;
+ pinctrl-names = "default";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "panel_en_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ backlight: backlight{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&pm8921_gpio 26 GPIO_ACTIVE_HIGH>;
+ default-on;
+ };
+
+ panel: auo,b101xtn01 {
+ status = "okay";
+ compatible = "auo,b101xtn01";
+
+ ddc-i2c-bus = <&gsbi3_i2c>;
+ backlight = <&backlight>;
+ power-supply = <&panel_3p3v>;
+ port {
+ auo_in: endpoint {
+ remote-endpoint = <&lvds_out>;
+ };
+ };
+ };
+
gsbi3: gsbi@16200000 {
status = "okay";
qcom,mode = <GSBI_PROT_I2C>;
@@ -223,6 +305,10 @@
};
};
+ adm: dma@18320000 {
+ status = "okay";
+ };
+
sata_phy0: phy@1b400000 {
status = "okay";
};
@@ -284,6 +370,27 @@
qcom,ssbi@500000 {
pmic@0 {
gpio@150 {
+
+ pwm_bl_gpios: pwm-bl-gpios {
+ pios {
+ pins = "gpio26";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ disp_en_gpios: disp-en-gpios {
+ pios {
+ pins = "gpio36";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
wlan_default_gpios: wlan-gpios {
pios {
pins = "gpio43";
@@ -293,6 +400,15 @@
};
};
+ bt_gpios: bt-gpio {
+ pios {
+ pins = "gpio44";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
notify_led: nled {
pios {
pins = "gpio18";
@@ -331,3 +447,19 @@
};
};
};
+
+&CPU0 {
+ cpu-supply = <&saw0>;
+};
+
+&CPU1 {
+ cpu-supply = <&saw1>;
+};
+
+&CPU2 {
+ cpu-supply = <&saw2>;
+};
+
+&CPU3 {
+ cpu-supply = <&saw3>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
index b57c59d5bc00b..2e9d633d06ad3 100644
--- a/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064-pins.dtsi
@@ -7,6 +7,21 @@
};
};
+ hdmi_pinctrl: hdmi-pinctrl {
+ mux1 {
+ pins = "gpio69", "gpio70", "gpio71";
+ function = "hdmi";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ mux2 {
+ pins = "gpio72";
+ function = "hdmi";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
ps_hold: ps_hold {
mux {
pins = "gpio78";
@@ -39,6 +54,20 @@
};
};
+ gsbi1_uart_2pins: gsbi1_uart_2pins {
+ mux {
+ pins = "gpio18", "gpio19";
+ function = "gsbi1";
+ };
+ };
+
+ gsbi1_uart_4pins: gsbi1_uart_4pins {
+ mux {
+ pins = "gpio18", "gpio19", "gpio20", "gpio21";
+ function = "gsbi1";
+ };
+ };
+
i2c2_pins: i2c2 {
mux {
pins = "gpio24", "gpio25";
@@ -205,4 +234,29 @@
function = "gsbi7";
};
};
+
+ i2c7_pins: i2c7 {
+ mux {
+ pins = "gpio84", "gpio85";
+ function = "gsbi7";
+ };
+
+ pinconf {
+ pins = "gpio84", "gpio85";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ i2c7_pins_sleep: i2c7_pins_sleep {
+ mux {
+ pins = "gpio84", "gpio85";
+ function = "gpio";
+ };
+ pinconf {
+ pins = "gpio84", "gpio85";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index 04f541bffbdd5..3b72b9ed31d75 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -1,11 +1,14 @@
/dts-v1/;
#include "skeleton.dtsi"
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/soc/qcom,gsbi.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/thermal/thermal.h>
+
/ {
model = "Qualcomm APQ8064";
compatible = "qcom,apq8064";
@@ -26,7 +29,7 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ CPU0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -35,9 +38,15 @@
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 0>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@1 {
+ CPU1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -46,9 +55,15 @@
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 1>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@2 {
+ CPU2: cpu@2 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -57,9 +72,15 @@
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 2>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@3 {
+ CPU3: cpu@3 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -68,6 +89,12 @@
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 3>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
L2: l2-cache {
@@ -75,6 +102,10 @@
cache-level = <2>;
};
+ qcom,l2 {
+ qcom,l2-rates = <384000000 972000000 1188000000>;
+ };
+
idle-states {
CPU_SPC: spc {
compatible = "qcom,idle-state-spc",
@@ -86,6 +117,116 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 7>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 8>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 9>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert2>;
+ cooling-device = <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 10>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert3>;
+ cooling-device = <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 10 0x304>;
@@ -124,6 +265,543 @@
hwlocks = <&sfpb_mutex 3>;
};
+ smd {
+ compatible = "qcom,smd";
+
+ modem@0 {
+ interrupts = <0 37 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 3>;
+ qcom,smd-edge = <0>;
+
+ status = "disabled";
+ };
+
+ q6@1 {
+ interrupts = <0 90 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 15>;
+ qcom,smd-edge = <1>;
+
+ status = "disabled";
+ };
+
+ dsps@3 {
+ interrupts = <0 138 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&sps_sic_non_secure 0x4080 0>;
+ qcom,smd-edge = <3>;
+
+ status = "disabled";
+ };
+
+ riva@6 {
+ interrupts = <0 198 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,ipc = <&l2cc 8 25>;
+ qcom,smd-edge = <6>;
+
+ status = "disabled";
+ };
+ };
+
+ smsm {
+ compatible = "qcom,smsm";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,ipc-1 = <&l2cc 8 4>;
+ qcom,ipc-2 = <&l2cc 8 14>;
+ qcom,ipc-3 = <&l2cc 8 23>;
+ qcom,ipc-4 = <&sps_sic_non_secure 0x4094 0>;
+
+ apps_smsm: apps@0 {
+ reg = <0>;
+ #qcom,state-cells = <1>;
+ };
+
+ modem_smsm: modem@1 {
+ reg = <1>;
+ interrupts = <0 38 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ q6_smsm: q6@2 {
+ reg = <2>;
+ interrupts = <0 89 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ wcnss_smsm: wcnss@3 {
+ reg = <3>;
+ interrupts = <0 204 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ dsps_smsm: dsps@4 {
+ reg = <4>;
+ interrupts = <0 137 IRQ_TYPE_EDGE_RISING>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ qcom,pvs {
+ qcom,pvs-format-a;
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >,
+ < 1026000000 1125000 >,
+ < 1080000000 1175000 >,
+ < 1134000000 1175000 >,
+ < 1188000000 1200000 >,
+ < 1242000000 1200000 >,
+ < 1296000000 1225000 >,
+ < 1350000000 1225000 >,
+ < 1404000000 1237500 >,
+ < 1458000000 1237500 >,
+ < 1512000000 1250000 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 925000 >,
+ < 594000000 950000 >,
+ < 702000000 975000 >,
+ < 810000000 1025000 >,
+ < 918000000 1050000 >,
+ < 1026000000 1075000 >,
+ < 1080000000 1125000 >,
+ < 1134000000 1125000 >,
+ < 1188000000 1150000 >,
+ < 1242000000 1150000 >,
+ < 1296000000 1175000 >,
+ < 1350000000 1175000 >,
+ < 1404000000 1187500 >,
+ < 1458000000 1187500 >,
+ < 1512000000 1200000 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1025000 >,
+ < 1080000000 1075000 >,
+ < 1134000000 1075000 >,
+ < 1188000000 1100000 >,
+ < 1242000000 1100000 >,
+ < 1296000000 1125000 >,
+ < 1350000000 1125000 >,
+ < 1404000000 1137500 >,
+ < 1458000000 1137500 >,
+ < 1512000000 1150000 >;
+
+ qcom,speed0-pvs4-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 962500 >,
+ < 918000000 975000 >,
+ < 1026000000 1000000 >,
+ < 1080000000 1050000 >,
+ < 1134000000 1050000 >,
+ < 1188000000 1075000 >,
+ < 1242000000 1075000 >,
+ < 1296000000 1100000 >,
+ < 1350000000 1100000 >,
+ < 1404000000 1112500 >,
+ < 1458000000 1112500 >,
+ < 1512000000 1125000 >;
+
+ qcom,speed1-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 1000000 >,
+ < 918000000 1025000 >,
+ < 1026000000 1037500 >,
+ < 1134000000 1075000 >,
+ < 1242000000 1087500 >,
+ < 1350000000 1125000 >,
+ < 1458000000 1150000 >,
+ < 1566000000 1175000 >,
+ < 1674000000 1225000 >,
+ < 1728000000 1250000 >;
+
+ qcom,speed1-pvs1-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1012500 >,
+ < 1134000000 1037500 >,
+ < 1242000000 1050000 >,
+ < 1350000000 1087500 >,
+ < 1458000000 1112500 >,
+ < 1566000000 1150000 >,
+ < 1674000000 1187500 >,
+ < 1728000000 1200000 >;
+
+ qcom,speed1-pvs2-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1566000000 1100000 >,
+ < 1674000000 1137500 >,
+ < 1728000000 1162500 >;
+
+ qcom,speed1-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1037500 >,
+ < 1566000000 1062500 >,
+ < 1674000000 1100000 >,
+ < 1728000000 1125000 >;
+
+ qcom,speed1-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1566000000 1037500 >,
+ < 1674000000 1075000 >,
+ < 1728000000 1100000 >;
+
+ qcom,speed1-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1566000000 1012500 >,
+ < 1674000000 1050000 >,
+ < 1728000000 1075000 >;
+
+ qcom,speed1-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1566000000 1000000 >,
+ < 1674000000 1025000 >,
+ < 1728000000 1050000 >;
+
+ qcom,speed2-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 950000 >,
+ < 810000000 962500 >,
+ < 918000000 975000 >,
+ < 1026000000 1000000 >,
+ < 1134000000 1025000 >,
+ < 1242000000 1037500 >,
+ < 1350000000 1062500 >,
+ < 1458000000 1100000 >,
+ < 1566000000 1125000 >,
+ < 1674000000 1175000 >,
+ < 1782000000 1225000 >,
+ < 1890000000 1287500 >;
+
+ qcom,speed2-pvs1-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1566000000 1100000 >,
+ < 1674000000 1137500 >,
+ < 1782000000 1187500 >,
+ < 1890000000 1250000 >;
+
+ qcom,speed2-pvs2-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 912500 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1012500 >,
+ < 1458000000 1050000 >,
+ < 1566000000 1075000 >,
+ < 1674000000 1112500 >,
+ < 1782000000 1162500 >,
+ < 1890000000 1212500 >;
+
+ qcom,speed2-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 912500 >,
+ < 1026000000 937500 >,
+ < 1134000000 962500 >,
+ < 1242000000 975000 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1025000 >,
+ < 1566000000 1050000 >,
+ < 1674000000 1087500 >,
+ < 1782000000 1137500 >,
+ < 1890000000 1175000 >;
+
+ qcom,speed2-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1566000000 1037500 >,
+ < 1674000000 1075000 >,
+ < 1782000000 1112500 >,
+ < 1890000000 1150000 >;
+
+ qcom,speed2-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1566000000 1012500 >,
+ < 1674000000 1050000 >,
+ < 1782000000 1087500 >,
+ < 1890000000 1125000 >;
+
+ qcom,speed2-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1566000000 1000000 >,
+ < 1674000000 1025000 >,
+ < 1782000000 1062500 >,
+ < 1890000000 1100000 >;
+
+ qcom,speed14-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 1000000 >,
+ < 918000000 1025000 >,
+ < 1026000000 1037500 >,
+ < 1134000000 1075000 >,
+ < 1242000000 1087500 >,
+ < 1350000000 1125000 >,
+ < 1458000000 1150000 >,
+ < 1512000000 1162500 >;
+
+ qcom,speed14-pvs1-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1012500 >,
+ < 1134000000 1037500 >,
+ < 1242000000 1050000 >,
+ < 1350000000 1087500 >,
+ < 1458000000 1112500 >,
+ < 1512000000 1125000 >;
+
+ qcom,speed14-pvs2-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1512000000 1087500 >;
+
+ qcom,speed14-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1037500 >,
+ < 1512000000 1050000 >;
+
+ qcom,speed14-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1512000000 1012500 >;
+
+ qcom,speed14-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1512000000 1000000 >;
+
+ qcom,speed14-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1512000000 987500 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
+
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ };
+
+ clocks {
+ cxo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ };
+
+ pxo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <27000000>;
+ };
+
+ sleep_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ };
+ };
+
+ firmware {
+ compatible = "simple-bus";
+
+ scm {
+ compatible = "qcom,scm";
+ clocks = <&gcc CE3_CORE_CLK> , <&gcc CE3_H_CLK>;
+ clock-names = "core", "iface";
+ };
+ };
+
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -168,48 +846,80 @@
cpu-offset = <0x80000>;
};
+ watchdog@208a038 {
+ compatible = "qcom,kpss-wdt-apq8064";
+ reg = <0x0208a038 0x40>;
+ clocks = <&sleep_clk>;
+ timeout-sec = <10>;
+ };
+
acc0: clock-controller@2088000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
acc1: clock-controller@2098000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu1_aux";
};
acc2: clock-controller@20a8000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x020a8000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu2_aux";
};
acc3: clock-controller@20b8000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x020b8000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu3_aux";
};
saw0: power-controller@2089000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait0";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw1: power-controller@2099000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02099000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait1";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw2: power-controller@20a9000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020a9000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait2";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw3: power-controller@20b9000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020b9000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait3";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
+ };
+
+ sps_sic_non_secure: sps-sic-non-secure@12100000 {
+ compatible = "syscon";
+ reg = <0x12100000 0x10000>;
};
gsbi1: gsbi@12440000 {
@@ -225,9 +935,20 @@
syscon-tcsr = <&tcsr>;
+ gsbi1_serial: serial@12450000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x12450000 0x100>,
+ <0x12400000 0x03>;
+ interrupts = <0 193 0x0>;
+ clocks = <&gcc GSBI1_UART_CLK>, <&gcc GSBI1_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
+
gsbi1_i2c: i2c@12460000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c1_pins &i2c1_pins_sleep>;
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-1 = <&i2c1_pins_sleep>;
pinctrl-names = "default", "sleep";
reg = <0x12460000 0x1000>;
interrupts = <0 194 IRQ_TYPE_NONE>;
@@ -255,7 +976,8 @@
gsbi2_i2c: i2c@124a0000 {
compatible = "qcom,i2c-qup-v1.1.1";
reg = <0x124a0000 0x1000>;
- pinctrl-0 = <&i2c2_pins &i2c2_pins_sleep>;
+ pinctrl-0 = <&i2c2_pins>;
+ pinctrl-1 = <&i2c2_pins_sleep>;
pinctrl-names = "default", "sleep";
interrupts = <0 196 IRQ_TYPE_NONE>;
clocks = <&gcc GSBI2_QUP_CLK>, <&gcc GSBI2_H_CLK>;
@@ -277,7 +999,8 @@
ranges;
gsbi3_i2c: i2c@16280000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c3_pins &i2c3_pins_sleep>;
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-1 = <&i2c3_pins_sleep>;
pinctrl-names = "default", "sleep";
reg = <0x16280000 0x1000>;
interrupts = <GIC_SPI 151 IRQ_TYPE_NONE>;
@@ -302,7 +1025,8 @@
gsbi4_i2c: i2c@16380000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c4_pins &i2c4_pins_sleep>;
+ pinctrl-0 = <&i2c4_pins>;
+ pinctrl-1 = <&i2c4_pins_sleep>;
pinctrl-names = "default", "sleep";
reg = <0x16380000 0x1000>;
interrupts = <GIC_SPI 153 IRQ_TYPE_NONE>;
@@ -337,7 +1061,8 @@
compatible = "qcom,spi-qup-v1.1.1";
reg = <0x1a280000 0x1000>;
interrupts = <0 155 0>;
- pinctrl-0 = <&spi5_default &spi5_sleep>;
+ pinctrl-0 = <&spi5_default>;
+ pinctrl-1 = <&spi5_sleep>;
pinctrl-names = "default", "sleep";
clocks = <&gcc GSBI5_QUP_CLK>, <&gcc GSBI5_H_CLK>;
clock-names = "core", "iface";
@@ -357,6 +1082,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
gsbi6_serial: serial@16540000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
@@ -365,12 +1091,20 @@
interrupts = <0 156 0x0>;
clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>;
clock-names = "core", "iface";
+
+ qcom,rx-crci = <11>;
+ qcom,tx-crci = <6>;
+
+ dmas = <&adm 6>, <&adm 7>;
+ dma-names = "rx", "tx";
+
status = "disabled";
};
gsbi6_i2c: i2c@16580000 {
compatible = "qcom,i2c-qup-v1.1.1";
- pinctrl-0 = <&i2c6_pins &i2c6_pins_sleep>;
+ pinctrl-0 = <&i2c6_pins>;
+ pinctrl-1 = <&i2c6_pins_sleep>;
pinctrl-names = "default", "sleep";
reg = <0x16580000 0x1000>;
interrupts = <GIC_SPI 157 IRQ_TYPE_NONE>;
@@ -401,6 +1135,19 @@
clock-names = "core", "iface";
status = "disabled";
};
+
+ gsbi7_i2c: i2c@16680000 {
+ compatible = "qcom,i2c-qup-v1.1.1";
+ pinctrl-0 = <&i2c7_pins>;
+ pinctrl-1 = <&i2c7_pins_sleep>;
+ pinctrl-names = "default", "sleep";
+ reg = <0x16680000 0x1000>;
+ interrupts = <GIC_SPI 159 IRQ_TYPE_NONE>;
+ clocks = <&gcc GSBI7_QUP_CLK>,
+ <&gcc GSBI7_H_CLK>;
+ clock-names = "core", "iface";
+ status = "disabled";
+ };
};
rng@1a500000 {
@@ -481,11 +1228,30 @@
};
};
+ qfprom: qfprom@00700000 {
+ compatible = "qcom,qfprom";
+ reg = <0x00700000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ tsens_calib: calib {
+ reg = <0x404 0x10>;
+ };
+ tsens_backup: backup_calib {
+ reg = <0x414 0x10>;
+ };
+ };
+
gcc: clock-controller@900000 {
compatible = "qcom,gcc-apq8064";
reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <1176 1176 1154 1176 1111
+ 1132 1132 1199 1132 1199 1132>;
#clock-cells = <1>;
#reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
};
lcc: clock-controller@28000000 {
@@ -783,6 +1549,26 @@
};
};
+ adm: dma@18320000 {
+ compatible = "qcom,adm";
+ reg = <0x18320000 0xE0000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_NONE>;
+ #dma-cells = <1>;
+
+ clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
+ clock-names = "core", "iface";
+
+ resets = <&gcc ADM0_RESET>,
+ <&gcc ADM0_PBUS_RESET>,
+ <&gcc ADM0_C0_RESET>,
+ <&gcc ADM0_C1_RESET>,
+ <&gcc ADM0_C2_RESET>;
+ reset-names = "clk", "pbus", "c0", "c1", "c2";
+ qcom,ee = <1>;
+
+ status = "disabled";
+ };
+
tcsr: syscon@1a400000 {
compatible = "qcom,tcsr-apq8064", "syscon";
reg = <0x1a400000 0x100>;
@@ -823,6 +1609,219 @@
reset-names = "axi", "ahb", "por", "pci", "phy";
status = "disabled";
};
+
+ hdmi_phy: qcom,hdmi-phy@4a00400 {
+ compatible = "qcom,hdmi-phy-8960";
+ reg-names = "hdmi_phy",
+ "hdmi_pll";
+ reg = <0x4a00400 0x60>,
+ <0x4a00500 0x100>;
+
+ clock-names = "slave_iface_clk";
+ clocks = <&mmcc HDMI_S_AHB_CLK>;
+ };
+
+ pil_q6v4: pil@28800000 {
+ compatible = "qcom,tz-pil", "qcom,apq8064-tz-pil";
+ qcom,firmware-name = "q6";
+ reg = <0x28800000 0x100>;
+ reg-names = "qdsp6_base";
+ qcom,pas-id = <1>; /* PAS_Q6 */
+ };
+
+ smd {
+ compatible = "qcom,smd";
+ adsp_a11 {
+ interrupts = <0 90 IRQ_TYPE_EDGE_RISING>;
+ qcom,ipc = <&l2cc 8 15>;
+ qcom,smd-edge = <1>;
+ qcom,remote-pid = <0x2>;
+ q6_requests {
+ compatible = "qcom,apr";
+ qcom,smd-channels = "apr_audio_svc";
+ rproc = <&pil_q6v4>;
+ };
+ };
+ };
+
+ dai_fe: dai_fe {
+ compatible = "qcom,msm-dai-fe";
+ #sound-dai-cells = <0>;
+ };
+
+ hdmi_dai: dai_hdmi {
+ compatible = "qcom,msm-dai-q6-hdmi";
+ #sound-dai-cells = <0>;
+ };
+
+ hdmi_codec: codec_hdmi {
+ compatible = "linux,hdmi-audio";
+ #sound-dai-cells = <0>;
+ };
+
+ q6_pcm: msm_pcm {
+ compatible = "qcom,msm-pcm-dsp";
+ #sound-dai-cells = <0>;
+ };
+
+ q6_route: msm_pcm_routing {
+ compatible = "qcom,msm-pcm-routing";
+ #sound-dai-cells = <0>;
+ };
+
+ snd {
+ compatible = "qcom,snd-apq8064";
+ };
+
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ compatible = "qcom,hdmi-tx-8960";
+ reg-names = "core_physical";
+ reg = <0x04a00000 0x2f0>;
+ interrupts = <GIC_SPI 79 0>;
+ clock-names =
+ "core_clk",
+ "master_iface_clk",
+ "slave_iface_clk";
+ clocks =
+ <&mmcc HDMI_APP_CLK>,
+ <&mmcc HDMI_M_AHB_CLK>,
+ <&mmcc HDMI_S_AHB_CLK>;
+ qcom,hdmi-tx-ddc-clk = <&tlmm_pinmux 70 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-ddc-data = <&tlmm_pinmux 71 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-hpd = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pinctrl>;
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ };
+
+ gpu: qcom,adreno-3xx@4300000 {
+ compatible = "qcom,adreno-3xx";
+ reg = <0x04300000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 80 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_clk",
+ "mem_iface_clk";
+ clocks =
+ <&mmcc GFX3D_CLK>,
+ <&mmcc GFX3D_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>,
+ <&mmcc MMSS_IMEM_AHB_CLK>;
+ qcom,chipid = <0x03020002>;
+
+ iommus = <&gfx3d 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ &gfx3d 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ &gfx3d1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ &gfx3d1 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31>;
+
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <450000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+ };
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp4";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 0>;
+ connectors = <&hdmi>;
+ gpus = <&gpu>;
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "lut_clk",
+ "src_clk",
+ "hdmi_clk",
+ "mdp_clk",
+ "mdp_axi_clk";
+ clocks =
+ <&mmcc MDP_CLK>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc TV_SRC>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+
+ iommus = <&mdp_port0 0 2
+ &mdp_port1 0 2>;
+ };
+
+ mdp_port0: qcom,iommu@7500000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 0>,
+ <GIC_SPI 64 0>;
+ ncb = <2>;
+ };
+
+ mdp_port1: qcom,iommu@7600000 {
+ compatible = "qcom,iommu";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07600000 0x100000>;
+ interrupts =
+ <GIC_SPI 61 0>,
+ <GIC_SPI 62 0>;
+ ncb = <2>;
+ };
+
+ gfx3d: qcom,iommu@7c00000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <16>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07c00000 0x100000>;
+ interrupts =
+ <GIC_SPI 69 0>,
+ <GIC_SPI 70 0>;
+ ncb = <3>;
+ };
+
+ gfx3d1: qcom,iommu@7d00000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <16>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07d00000 0x100000>;
+ interrupts =
+ <GIC_SPI 210 0>,
+ <GIC_SPI 211 0>;
+ ncb = <3>;
+ };
};
};
#include "qcom-apq8064-pins.dtsi"
+#include "qcom-apq8064-coresight.dtsi"
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index a33a09f6821ed..cd8a9f2ad653d 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -86,6 +86,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -142,6 +224,29 @@
reg = <0xf9011000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200 3200
+ 3200 3200 3200 3200 3200>;
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index da05e28a81a76..d3b7e3a608920 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -25,6 +25,10 @@
next-level-cache = <&L2>;
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
+ clocks = <&kraitcc 0>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
+
};
cpu@1 {
@@ -35,6 +39,10 @@
next-level-cache = <&L2>;
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
+ clocks = <&kraitcc 1>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
+
};
L2: l2-cache {
@@ -72,6 +80,39 @@
};
};
+ qcom,pvs {
+ qcom,pvs-format-a;
+ /* Hz uV */
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 925000 >,
+ < 594000000 950000 >,
+ < 702000000 975000 >,
+ < 810000000 1025000 >,
+ < 918000000 1050000 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -128,11 +169,6 @@
#reset-cells = <1>;
};
- l2cc: clock-controller@2011000 {
- compatible = "syscon";
- reg = <0x2011000 0x1000>;
- };
-
rpm@108000 {
compatible = "qcom,rpm-msm8960";
reg = <0x108000 0x1000>;
@@ -149,11 +185,19 @@
acc0: clock-controller@2088000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
acc1: clock-controller@2098000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu1_aux";
+ };
+
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc";
+ reg = <0x2011000 0x1000>;
+ clock-output-names = "acpu_l2_aux";
};
saw0: regulator@2089000 {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index 8193139d0d870..749221fc3ab64 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -65,7 +65,7 @@
#size-cells = <0>;
interrupts = <1 9 0xf04>;
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -74,9 +74,12 @@
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 0>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -85,9 +88,12 @@
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 1>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@2 {
+ cpu2: cpu@2 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -96,9 +102,12 @@
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 2>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@3 {
+ cpu3: cpu@3 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -107,6 +116,9 @@
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 3>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
L2: l2-cache {
@@ -126,6 +138,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -230,6 +324,267 @@
};
};
+ qcom,pvs {
+ qcom,pvs-format-b;
+ /* Hz uV ua */
+ qcom,speed0-pvs0-bin-v0 =
+ < 300000000 815000 73 >,
+ < 345600000 825000 85 >,
+ < 422400000 835000 104 >,
+ < 499200000 845000 124 >,
+ < 576000000 855000 144 >,
+ < 652800000 865000 165 >,
+ < 729600000 875000 186 >,
+ < 806400000 890000 208 >,
+ < 883200000 900000 229 >,
+ < 960000000 915000 252 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 300000000 800000 73 >,
+ < 345600000 810000 85 >,
+ < 422400000 820000 104 >,
+ < 499200000 830000 124 >,
+ < 576000000 840000 144 >,
+ < 652800000 850000 165 >,
+ < 729600000 860000 186 >,
+ < 806400000 875000 208 >,
+ < 883200000 885000 229 >,
+ < 960000000 895000 252 >;
+
+ qcom,speed0-pvs2-bin-v0 =
+ < 300000000 785000 73 >,
+ < 345600000 795000 85 >,
+ < 422400000 805000 104 >,
+ < 499200000 815000 124 >,
+ < 576000000 825000 144 >,
+ < 652800000 835000 165 >,
+ < 729600000 845000 186 >,
+ < 806400000 855000 208 >,
+ < 883200000 865000 229 >,
+ < 960000000 875000 252 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 300000000 775000 73 >,
+ < 345600000 780000 85 >,
+ < 422400000 790000 104 >,
+ < 499200000 800000 124 >,
+ < 576000000 810000 144 >,
+ < 652800000 820000 165 >,
+ < 729600000 830000 186 >,
+ < 806400000 840000 208 >,
+ < 883200000 850000 229 >,
+ < 960000000 860000 252 >;
+
+ qcom,speed0-pvs4-bin-v0 =
+ < 300000000 775000 73 >,
+ < 345600000 775000 85 >,
+ < 422400000 780000 104 >,
+ < 499200000 790000 124 >,
+ < 576000000 800000 144 >,
+ < 652800000 810000 165 >,
+ < 729600000 820000 186 >,
+ < 806400000 830000 208 >,
+ < 883200000 840000 229 >,
+ < 960000000 850000 252 >;
+
+ qcom,speed0-pvs5-bin-v0 =
+ < 300000000 750000 73 >,
+ < 345600000 760000 85 >,
+ < 422400000 770000 104 >,
+ < 499200000 780000 124 >,
+ < 576000000 790000 144 >,
+ < 652800000 800000 165 >,
+ < 729600000 810000 186 >,
+ < 806400000 820000 208 >,
+ < 883200000 830000 229 >,
+ < 960000000 840000 252 >;
+
+ qcom,speed0-pvs6-bin-v0 =
+ < 300000000 750000 73 >,
+ < 345600000 750000 85 >,
+ < 422400000 760000 104 >,
+ < 499200000 770000 124 >,
+ < 576000000 780000 144 >,
+ < 652800000 790000 165 >,
+ < 729600000 800000 186 >,
+ < 806400000 810000 208 >,
+ < 883200000 820000 229 >,
+ < 960000000 830000 252 >;
+
+ qcom,speed2-pvs0-bin-v0 =
+ < 300000000 800000 72 >,
+ < 345600000 800000 83 >,
+ < 422400000 805000 102 >,
+ < 499200000 815000 121 >,
+ < 576000000 825000 141 >,
+ < 652800000 835000 161 >,
+ < 729600000 845000 181 >,
+ < 806400000 855000 202 >,
+ < 883200000 865000 223 >,
+ < 960000000 875000 245 >;
+
+ qcom,speed2-pvs1-bin-v0 =
+ < 300000000 800000 72 >,
+ < 345600000 800000 83 >,
+ < 422400000 800000 102 >,
+ < 499200000 800000 121 >,
+ < 576000000 810000 141 >,
+ < 652800000 820000 161 >,
+ < 729600000 830000 181 >,
+ < 806400000 840000 202 >,
+ < 883200000 850000 223 >,
+ < 960000000 860000 245 >;
+
+ qcom,speed2-pvs2-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 785000 121 >,
+ < 576000000 795000 141 >,
+ < 652800000 805000 161 >,
+ < 729600000 815000 181 >,
+ < 806400000 825000 202 >,
+ < 883200000 835000 223 >,
+ < 960000000 845000 245 >;
+
+ qcom,speed2-pvs3-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 775000 121 >,
+ < 576000000 780000 141 >,
+ < 652800000 790000 161 >,
+ < 729600000 800000 181 >,
+ < 806400000 810000 202 >,
+ < 883200000 820000 223 >,
+ < 960000000 830000 245 >;
+
+ qcom,speed2-pvs4-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 775000 121 >,
+ < 576000000 775000 141 >,
+ < 652800000 780000 161 >,
+ < 729600000 790000 181 >,
+ < 806400000 800000 202 >,
+ < 883200000 810000 223 >,
+ < 960000000 820000 245 >;
+
+ qcom,speed2-pvs5-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 102 >,
+ < 499200000 750000 121 >,
+ < 576000000 760000 141 >,
+ < 652800000 770000 161 >,
+ < 729600000 780000 181 >,
+ < 806400000 790000 202 >,
+ < 883200000 800000 223 >,
+ < 960000000 810000 245 >;
+
+ qcom,speed2-pvs6-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 102 >,
+ < 499200000 750000 121 >,
+ < 576000000 750000 141 >,
+ < 652800000 760000 161 >,
+ < 729600000 770000 181 >,
+ < 806400000 780000 202 >,
+ < 883200000 790000 223 >,
+ < 960000000 800000 245 >;
+
+ qcom,speed1-pvs0-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 101 >,
+ < 499200000 780000 120 >,
+ < 576000000 790000 139 >,
+ < 652800000 800000 159 >,
+ < 729600000 810000 180 >,
+ < 806400000 820000 200 >,
+ < 883200000 830000 221 >,
+ < 960000000 840000 242 >;
+
+ qcom,speed1-pvs1-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 101 >,
+ < 499200000 775000 120 >,
+ < 576000000 775000 139 >,
+ < 652800000 785000 159 >,
+ < 729600000 795000 180 >,
+ < 806400000 805000 200 >,
+ < 883200000 815000 221 >,
+ < 960000000 825000 242 >;
+
+ qcom,speed1-pvs2-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 760000 139 >,
+ < 652800000 770000 159 >,
+ < 729600000 780000 180 >,
+ < 806400000 790000 200 >,
+ < 883200000 800000 221 >,
+ < 960000000 810000 242 >;
+
+ qcom,speed1-pvs3-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 750000 139 >,
+ < 652800000 755000 159 >,
+ < 729600000 765000 180 >,
+ < 806400000 775000 200 >,
+ < 883200000 785000 221 >,
+ < 960000000 795000 242 >;
+
+ qcom,speed1-pvs4-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 750000 139 >,
+ < 652800000 750000 159 >,
+ < 729600000 755000 180 >,
+ < 806400000 765000 200 >,
+ < 883200000 775000 221 >,
+ < 960000000 785000 242 >;
+
+ qcom,speed1-pvs5-bin-v0 =
+ < 300000000 725000 72 >,
+ < 345600000 725000 83 >,
+ < 422400000 725000 101 >,
+ < 499200000 725000 120 >,
+ < 576000000 725000 139 >,
+ < 652800000 735000 159 >,
+ < 729600000 745000 180 >,
+ < 806400000 755000 200 >,
+ < 883200000 765000 221 >,
+ < 960000000 775000 242 >;
+
+ qcom,speed1-pvs6-bin-v0 =
+ < 300000000 725000 72 >,
+ < 345600000 725000 83 >,
+ < 422400000 725000 101 >,
+ < 499200000 725000 120 >,
+ < 576000000 725000 139 >,
+ < 652800000 725000 159 >,
+ < 729600000 735000 180 >,
+ < 806400000 745000 200 >,
+ < 883200000 755000 221 >,
+ < 960000000 765000 242 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v2";
+ #clock-cells = <1>;
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -249,6 +604,29 @@
reg = <0xf9011000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200 3200
+ 3200 3200 3200 3200 3200>;
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -328,7 +706,37 @@
reg = <0xf90b9000 0x1000>, <0xf9009000 0x1000>;
};
- saw_l2: power-controller@f9012000 {
+ clock-controller@f9016000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf9016000 0x30>;
+ clock-output-names = "hfpll_l2";
+ };
+
+ clock-controller@f908a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll0";
+ };
+
+ clock-controller@f909a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf909a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll1";
+ };
+
+ clock-controller@f90aa000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf90aa000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll2";
+ };
+
+ clock-controller@f90ba000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf90ba000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll3";
+ };
+
+ saw_l2: regulator@f9012000 {
compatible = "qcom,saw2";
reg = <0xf9012000 0x1000>;
regulator;
@@ -548,3 +956,4 @@
};
};
};
+
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 9353184d730df..aea77fdce19bf 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -9,6 +9,9 @@ config DMABOUNCE
bool
select ZONE_DMA
+config KRAIT_L2_ACCESSORS
+ bool
+
config SHARP_LOCOMO
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 27f23b15b1ea2..3137e2ce44cee 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -7,6 +7,7 @@ obj-y += firmware.o
obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
+obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
diff --git a/arch/arm/common/krait-l2-accessors.c b/arch/arm/common/krait-l2-accessors.c
new file mode 100644
index 0000000000000..5d514bbc88a63
--- /dev/null
+++ b/arch/arm/common/krait-l2-accessors.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/barrier.h>
+#include <asm/krait-l2-accessors.h>
+
+static DEFINE_RAW_SPINLOCK(krait_l2_lock);
+
+void krait_set_l2_indirect_reg(u32 addr, u32 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then write to the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val));
+ isb();
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+}
+EXPORT_SYMBOL(krait_set_l2_indirect_reg);
+
+u32 krait_get_l2_indirect_reg(u32 addr)
+{
+ u32 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then read from the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val));
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(krait_get_l2_indirect_reg);
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 28234906a0646..f300ce7b40d31 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -132,8 +132,15 @@ CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_QORIQ_CPUFREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_QCOM_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_ARM_QCOM_CPUIDLE=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_ARM_ZYNQ_CPUIDLE=y
@@ -163,10 +170,20 @@ CONFIG_CAN_RCAR=m
CONFIG_CAN_XILINXCAN=y
CONFIG_CAN_MCP251X=y
CONFIG_CAN_SUN4I=y
-CONFIG_BT=m
+CONFIG_BT=y
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_ATH3K=y
CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_RFKILL=y
CONFIG_RFKILL_INPUT=y
@@ -219,6 +236,7 @@ CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
CONFIG_HIX5HD2_GMAC=y
CONFIG_SUN4I_EMAC=y
+CONFIG_ATL1C=y
CONFIG_MACB=y
CONFIG_NET_CALXEDA_XGMAC=y
CONFIG_GIANFAR=y
@@ -247,6 +265,10 @@ CONFIG_USB_RTL8152=m
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
+CONFIG_ATH_CARDS=m
+CONFIG_ATH_DEBUG=y
+CONFIG_ATH6KL=m
+CONFIG_ATH6KL_SDIO=m
CONFIG_BRCMFMAC=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
@@ -332,6 +354,7 @@ CONFIG_I2C_GPIO=m
CONFIG_I2C_EXYNOS5=y
CONFIG_I2C_IMX=m
CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_QUP=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
CONFIG_I2C_S3C2410=y
@@ -426,6 +449,7 @@ CONFIG_DAVINCI_WATCHDOG=m
CONFIG_EXYNOS_THERMAL=m
CONFIG_ST_THERMAL_SYSCFG=y
CONFIG_ST_THERMAL_MEMMAP=y
+CONFIG_QCOM_TSENS=y
CONFIG_WATCHDOG=y
CONFIG_DA9063_WATCHDOG=m
CONFIG_XILINX_WATCHDOG=y
@@ -550,10 +574,11 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_AS3711=y
+CONFIG_BACKLIGHT_GPIO=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
-CONFIG_SOUND=m
-CONFIG_SND=m
+CONFIG_SOUND=y
+CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_HDA_TEGRA=m
CONFIG_SND_HDA_INPUT_BEEP=y
@@ -569,6 +594,11 @@ CONFIG_SND_SOC_ROCKCHIP=m
CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
CONFIG_SND_SOC_ROCKCHIP_MAX98090=m
CONFIG_SND_SOC_ROCKCHIP_RT5645=m
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_MSM_SOC=y
+CONFIG_SND_SOC_MSM_QDSP6_HDMI_AUDIO=y
+CONFIG_SND_SOC_QDSP6V2=y
+CONFIG_SND_SOC_MSM8960=y
CONFIG_SND_SOC_SH4_FSI=m
CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_RSRC_CARD=m
@@ -587,13 +617,14 @@ CONFIG_SND_SOC_TEGRA_MAX98090=m
CONFIG_SND_SOC_AK4642=m
CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
+CONFIG_SND_SOC_HDMI_CODEC=y
CONFIG_SND_SOC_WM8978=m
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_XHCI_RCAR=m
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
+CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
@@ -618,14 +649,14 @@ CONFIG_KEYSTONE_USB_PHY=y
CONFIG_OMAP_USB3=y
CONFIG_USB_GPIO_VBUS=y
CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
+CONFIG_USB_MSM_OTG=y
CONFIG_USB_MXS_PHY=y
CONFIG_USB_GADGET=y
CONFIG_USB_FSL_USB2=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_USB_ETH=m
CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -698,6 +729,7 @@ CONFIG_RTC_DRV_DIGICOLOR=m
CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_S3C=m
CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_PM8XXX=y
CONFIG_RTC_DRV_AT91RM9200=m
CONFIG_RTC_DRV_AT91SAM9=m
CONFIG_RTC_DRV_VT8500=y
@@ -725,6 +757,7 @@ CONFIG_MXS_DMA=y
CONFIG_DMA_BCM2835=y
CONFIG_DMA_OMAP=y
CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_ADM=y
CONFIG_XILINX_VDMA=y
CONFIG_DMA_SUN6I=y
CONFIG_STAGING=y
@@ -758,8 +791,20 @@ CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_QCOM_RPMCC=y
+CONFIG_QCOM_HFPLL=y
+CONFIG_KPSS_XCC=y
+CONFIG_KRAITCC=y
+CONFIG_QCOM_CLK_RPM=y
+CONFIG_MSM_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_TZ_PIL=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD=y
+CONFIG_QCOM_SMEM=y
CONFIG_PM_DEVFREQ=y
CONFIG_ARM_TEGRA_DEVFREQ=m
CONFIG_MEMORY=y
@@ -791,7 +836,7 @@ CONFIG_TI_PIPE3=y
CONFIG_PHY_BERLIN_USB=y
CONFIG_PHY_BERLIN_SATA=y
CONFIG_PHY_ROCKCHIP_USB=m
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_MIPHY28LP=y
CONFIG_PHY_MIPHY365X=y
CONFIG_PHY_RCAR_GEN2=m
@@ -804,6 +849,7 @@ CONFIG_NVMEM=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
@@ -827,7 +873,6 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_CRYPTO_DEV_TEGRA_AES=y
-CONFIG_CPUFREQ_DT=y
CONFIG_KEYSTONE_IRQ=y
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
@@ -849,3 +894,7 @@ CONFIG_VIRTIO=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_PCI_LEGACY=y
CONFIG_VIRTIO_MMIO=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_TRACEPOINT_BENCHMARK=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 7bff7bf24a851..f83c97392d5d4 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -23,19 +23,37 @@ CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8X60=y
CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_STUB=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIE_QCOM=y
CONFIG_SMP=y
CONFIG_HAVE_ARM_ARCH_TIMER=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_QCOM_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_QCOM_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -50,25 +68,44 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
+CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_ATL1C=y
CONFIG_KS8851=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_GPIO=y
@@ -78,6 +115,9 @@ CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH_CARDS=m
+CONFIG_ATH6KL=m
+CONFIG_ATH6KL_SDIO=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -94,7 +134,6 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
CONFIG_SPI=y
@@ -108,7 +147,6 @@ CONFIG_PINCTRL_MSM8X74=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
CONFIG_GPIOLIB=y
-CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_CHARGER_QCOM_SMBB=y
CONFIG_POWER_RESET=y
@@ -117,12 +155,78 @@ CONFIG_THERMAL=y
CONFIG_MFD_PM8921_CORE=y
CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
+CONFIG_CPU_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_WATCHDOG=y
+CONFIG_QCOM_WDT=y
+CONFIG_MFD_SYSCON=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_QCOM_RPM=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_MEDIA_SUPPORT=y
-CONFIG_FB=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_DTCS033=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STK1135=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+CONFIG_USB_ZR364XX=m
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GPIO=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -130,16 +234,39 @@ CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_SPI is not set
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_MSM_SOC=y
+CONFIG_SND_SOC_MSM_QDSP6_HDMI_AUDIO=y
+CONFIG_SND_SOC_QDSP6V2=y
+CONFIG_SND_SOC_MSM8960=y
+CONFIG_SND_SOC_HDMI_CODEC=y
CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG_FSM=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_SERIAL=y
+CONFIG_USB_MSM_OTG=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_ETH=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
@@ -150,6 +277,7 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PM8XXX=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_ADM=y
CONFIG_STAGING=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_APQ_MMCC_8084=y
@@ -159,6 +287,12 @@ CONFIG_MSM_LCC_8960=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_QCOM_HFPLL=y
+CONFIG_KPSS_XCC=y
+CONFIG_KRAITCC=y
+CONFIG_QCOM_CLK_RPM=y
+CONFIG_MSM_IOMMU=y
+CONFIG_QCOM_TZ_PIL=y
CONFIG_QCOM_GSBI=y
CONFIG_QCOM_PM=y
CONFIG_QCOM_SMEM=y
@@ -166,16 +300,26 @@ CONFIG_QCOM_SMD=y
CONFIG_QCOM_SMD_RPM=y
CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_QCOM_IPQ806X_SATA=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
@@ -187,5 +331,5 @@ CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 0000000000000..48fe5527bc014
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c941e93048ad4..9d43edb8cc0d1 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2275,6 +2275,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->archdata.dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 4f436220384f8..611459a633c1b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -188,6 +188,32 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config ARM64_DMA_USE_IOMMU
+ bool
+ select ARM_HAS_SG_CHAIN
+ select NEED_SG_DMA_LENGTH
+
+if ARM64_DMA_USE_IOMMU
+
+config ARM64_DMA_IOMMU_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a waste of address space. Drivers which has
+ relatively small addressing window (like 64Mib) might run out of
+ virtual space with just a few allocations.
+
+ With this parameter you can specify the maximum PAGE_SIZE order for
+ DMA IOMMU buffers. Larger buffers will be aligned only to this
+ specified order. The order is expressed as a power of two multiplied
+ by the PAGE_SIZE.
+
+endif
+
config SMP
def_bool y
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index fa1f661ccccfb..2d24bda17ef84 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -1,5 +1,8 @@
-dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msm8916-mtp.dtb
dtb-$(CONFIG_ARCH_QCOM) += msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8096-dragonboard.dtb
+dtb-$(CONFIG_ARCH_QCOM) += apq8096-sbc.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index f881437d53c5f..d946408121946 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -1,4 +1,5 @@
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-mpp.h>
&pm8916_gpios {
@@ -30,6 +31,18 @@
&pm8916_mpps {
+ pinctrl-names = "default";
+ pinctrl-0 = <&ls_exp_gpio_f>;
+
+ ls_exp_gpio_f: pm8916_mpp4 {
+ pinconf {
+ pins = "mpp4";
+ function = "digital";
+ output-low;
+ power-source = <PM8916_MPP_L5>; // 1.8V
+ };
+ };
+
pm8916_mpps_leds: pm8916_mpps_leds {
pinconf {
pins = "mpp2", "mpp3";
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index ee828a8a82361..185388de914c6 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -24,4 +24,65 @@
bias-pull-up;
};
};
+
+ adv7533_int_active: adv533_int_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_int_suspend: adv7533_int_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_active: adv7533_switch_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_suspend: adv7533_switch_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ msm_key_volp_n_default: msm_key_volp_n_default {
+ pinmux {
+ function = "gpio";
+ pins = "gpio107";
+ };
+ pinconf {
+ pins = "gpio107";
+ drive-strength = <8>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index 825f489a2af7e..b0cac5337fcca 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -12,10 +12,11 @@
*/
/dts-v1/;
-
+#include <dt-bindings/arm/qcom-ids.h>
#include "apq8016-sbc.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ 8016 SBC";
compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc";
+ qcom,board-id = <QCOM_BRD_ID(SBC, 1, 0) QCOM_BRD_SUBTYPE_DEFAULT>;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 205ef89b8ca0b..1613a3013b2ca 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -11,14 +11,17 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/sound/apq8016-lpass.h>
#include "msm8916.dtsi"
#include "pm8916.dtsi"
#include "apq8016-sbc-soc-pins.dtsi"
#include "apq8016-sbc-pmic-pins.dtsi"
+#include "msm8916-mdss.dtsi"
/ {
aliases {
- serial0 = &blsp1_uart2;
serial1 = &blsp1_uart1;
usid0 = &pm8916_0;
i2c0 = &blsp_i2c2;
@@ -28,11 +31,11 @@
spi1 = &blsp_spi3;
};
- chosen {
- stdout-path = "serial0";
- };
-
soc {
+ dma@7884000 {
+ status = "okay";
+ };
+
serial@78af000 {
label = "LS-UART0";
status = "okay";
@@ -59,6 +62,29 @@
/* On High speed expansion */
label = "HS-I2C2";
status = "okay";
+
+ adv_bridge: bridge@39 {
+ status = "okay";
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupt-parent = <&msmgpio>;
+ interrupts = <31 2>;
+ avdd-supply = <&pm8916_l6>;
+ v3p3-supply = <&pm8916_l17>;
+ adi,dsi-lanes = <4>;
+ pd-gpios = <&msmgpio 32 0>;
+ pinctrl-names = "default","sleep";
+ pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>;
+ pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>;
+ #sound-dai-cells = <1>;
+
+ port {
+ adv_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ };
};
i2c@78ba000 {
@@ -140,6 +166,71 @@
status = "okay";
};
+ /*
+ Internal Codec
+ playback - Primary MI2S
+ capture - Ter MI2S
+
+ External Primary:
+ playback - secondary MI2S
+ capture - Quat MI2S
+
+ External Secondary:
+ playback - Quat MI2S
+ capture - Quat MI2S
+
+ */
+ sound {
+ status = "okay";
+ pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
+ pinctrl-names = "default", "sleep";
+ qcom,model = "DB410c";
+ qcom,audio-routing =
+ "MIC BIAS External", "Handset Mic",
+ "MIC BIAS Internal2", "Headset Mic",
+ "MIC BIAS External", "Secondary Mic",
+ "AMIC1", "MIC BIAS External",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External",
+ "DMIC1", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic1",
+ "DMIC2", "MIC BIAS Internal1",
+ "MIC BIAS Internal1", "Digital Mic2";
+
+ /* External Primary or External Secondary -ADV7533 HDMI */
+ external-dai-link@0 {
+ link-name = "ADV7533";
+
+ cpu { /* QUAT */
+ sound-dai = <&lpass MI2S_QUATERNARY>;
+ };
+ codec {
+ sound-dai = <&adv_bridge 0>;
+ };
+ };
+
+ internal-codec-playback-dai-link@0 { /* I2S - Internal codec */
+ link-name = "WCD";
+ cpu { /* PRIMARY */
+ sound-dai = <&lpass MI2S_PRIMARY>;
+ };
+ codec {
+ sound-dai = <&wcd_codec 0>;
+ };
+ };
+
+ internal-codec-capture-dai-link@0 { /* I2S - Internal codec */
+ link-name = "WCD-Capture";
+ cpu { /* PRIMARY */
+ sound-dai = <&lpass MI2S_TERTIARY>;
+ };
+ codec {
+ sound-dai = <&wcd_codec 1>;
+ };
+ };
+ };
+
usb@78d9000 {
extcon = <&usb_id>, <&usb_id>;
status = "okay";
@@ -164,6 +255,26 @@
lpass@07708000 {
status = "okay";
};
+
+ dsi@1a98000 {
+ status = "okay";
+
+ vdda-supply = <&pm8916_l2>;
+ vddio-supply = <&pm8916_l6>;
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&adv_in>;
+ qcom,data-lane-map = <0 1 2 3>;
+ };
+ };
+ };
+
+ dsi-phy@1a98300 {
+ status = "okay";
+
+ vddio-supply = <&pm8916_l6>;
+ };
};
usb2513 {
@@ -178,6 +289,27 @@
pinctrl-names = "default";
pinctrl-0 = <&usb_id_default>;
};
+
+ gpio_keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ autorepeat;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&msm_key_volp_n_default>;
+
+ button@0 {
+ label = "Volume Up";
+ linux,code = <KEY_VOLUMEUP>;
+ gpios = <&msmgpio 107 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&spmi_pon {
+ // Overwrite RESETIN_N keyboard scan code
+ linux,code = <KEY_VOLUMEDOWN>;
};
&smd_rpm_regulators {
@@ -299,3 +431,22 @@
regulator-max-microvolt = <3337000>;
};
};
+
+&wcd_codec {
+ status = "okay";
+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "mclk";
+ digital = <&wcd_digital>;
+};
+/* default regulators required for mezzanine boards */
+&pm8916_l15 {
+ regulator-always-on;
+};
+
+&vidc_rproc {
+ status = "okay";
+};
+
+&vidc {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts
new file mode 100644
index 0000000000000..6722b3f6b40d2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8096-dragonboard.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ 8096 DragonBoard";
+ compatible = "qcom,apq8096-dragonboard";
+ qcom,pmic-id = <0x20009 0x2000A 0x0 0x0>;
+ qcom,board-id = <10 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi
new file mode 100644
index 0000000000000..d8a415212c9e2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-dragonboard.dtsi
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+#include "pm8994.dtsi"
+
+/ {
+ aliases {
+ serial0 = &blsp2_uart1;
+ };
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ soc {
+ serial@75b0000 {
+ status = "okay";
+ };
+
+ ufsphy@627000 {
+ status = "okay";
+ };
+
+ ufshc@624000 {
+ status = "okay";
+ };
+
+ hdmi-phy@9a0600 {
+ status = "okay";
+
+ vddio-supply = <&pm8994_l12>;
+ vcca-supply = <&pm8994_l28>;
+ };
+
+ hdmi-tx@9a0000 {
+ status = "okay";
+
+ core-vdda-supply = <&pm8994_l12>;
+ core-vcc-supply = <&pm8994_s4>;
+ };
+ };
+
+ glink {
+ rpm {
+ rpm-requests {
+ pm8994-regulators {
+ vdd_l1-supply = <&pm8994_s3>;
+ vdd_l2_l26_l28-supply = <&pm8994_s3>;
+ vdd_l3_l11-supply = <&pm8994_s3>;
+ vdd_l4_l27_l31-supply = <&pm8994_s3>;
+ vdd_l5_l7-supply = <&pm8994_s5>;
+ vdd_l14_l15-supply = <&pm8994_s5>;
+ vdd_l20_l21-supply = <&pm8994_s5>;
+ vdd_l25-supply = <&pm8994_s3>;
+
+ s3 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ };
+ s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ s5 {
+ regulator-min-microvolt = <2150000>;
+ regulator-max-microvolt = <2150000>;
+ };
+ s7 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <800000>;
+ };
+
+ l1 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l2 {
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
+ };
+ l3 {
+ regulator-min-microvolt = <850000>;
+ regulator-max-microvolt = <850000>;
+ };
+ l4 {
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ };
+ l6 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ };
+ l8 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l9 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l10 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l11 {
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
+ };
+ l12 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l13 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l14 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l15 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l16 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2700000>;
+ };
+ l17 {
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
+ };
+ l18 {
+ regulator-min-microvolt = <2700000>;
+ regulator-max-microvolt = <2900000>;
+ };
+ l19 {
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ };
+ l20 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ regulator-allow-set-load;
+ };
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ l23 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l24 {
+ regulator-min-microvolt = <3075000>;
+ regulator-max-microvolt = <3075000>;
+ };
+ l25 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-allow-set-load;
+ };
+ l27 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ };
+ l28 {
+ regulator-min-microvolt = <925000>;
+ regulator-max-microvolt = <925000>;
+ regulator-allow-set-load;
+ };
+ l29 {
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ };
+ l30 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ l32 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ };
+ };
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/apq8096-sbc.dts b/arch/arm64/boot/dts/qcom/apq8096-sbc.dts
new file mode 100644
index 0000000000000..3311e633194be
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/apq8096-sbc.dts
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "apq8096-dragonboard.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. APQ 8096 SBC";
+ compatible = "qcom,apq8096-sbc";
+ qcom,pmic-id = <0x20009 0x10013 0x0 0x0>;
+ qcom,board-id = <24 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi
new file mode 100644
index 0000000000000..6f73eb26aaecc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi
@@ -0,0 +1,238 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gfx_iommu: qcom,iommu@1f00000 {
+ compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0x1f00000 0x10000>;
+ reg-names = "iommu_base";
+ interrupts = <0 43 0>, <0 42 0>;
+ interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq";
+ label = "gfx_iommu";
+ qcom,iommu-secure-id = <18>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_GFX_TCU_CLK>;
+ clock-names = "iface_clk", "core_clk";
+ status = "disabled";
+
+ qcom,iommu-ctx@1f09000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1f09000 0x1000>;
+ interrupts = <0 241 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "gfx3d_user";
+ };
+
+ qcom,iommu-ctx@1f0a000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1f0a000 0x1000>;
+ interrupts = <0 242 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "gfx3d_priv";
+ };
+ };
+
+ apps_iommu: qcom,iommu@1e00000 {
+ compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0x1e00000 0x40000
+ 0x1ef0000 0x3000>;
+ reg-names = "iommu_base", "smmu_local_base";
+ interrupts = <0 43 0>, <0 42 0>;
+ interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq";
+ label = "apps_iommu";
+ qcom,iommu-secure-id = <17>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface_clk", "core_clk";
+ qcom,cb-base-offset = <0x20000>;
+ status = "disabled";
+
+ qcom,iommu-ctx@1e22000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e22000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x2000>;
+ label = "jpeg_enc0";
+ };
+
+ qcom,iommu-ctx@1e23000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e23000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x400>;
+ label = "vfe";
+ };
+
+ qcom,iommu-ctx@1e24000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e24000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0xc00>;
+ label = "mdp_0";
+ };
+
+ venus_ns: qcom,iommu-ctx@1e25000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e25000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x800 0x801 0x802 0x803
+ 0x804 0x805 0x807>;
+ label = "venus_ns";
+ };
+
+ qcom,iommu-ctx@1e26000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e26000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x402>;
+ label = "cpp";
+ };
+
+ qcom,iommu-ctx@1e27000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e27000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1000>;
+ label = "mDSP";
+ };
+
+ qcom,iommu-ctx@1e28000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e28000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1400>;
+ label = "gss";
+ };
+
+ qcom,iommu-ctx@1e29000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e29000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1800>;
+ label = "a2";
+ };
+
+ qcom,iommu-ctx@1e32000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e32000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0xc01>;
+ label = "mdp_1";
+ };
+
+ venus_sec_pixel: qcom,iommu-ctx@1e33000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e33000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x885>;
+ label = "venus_sec_pixel";
+ };
+
+ venus_sec_bitstream: qcom,iommu-ctx@1e34000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e34000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x880 0x881 0x882 0x883 0x884>;
+ label = "venus_sec_bitstream";
+ };
+
+ venus_sec_non_pixel: qcom,iommu-ctx@1e35000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e35000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x887 0x8a0>;
+ label = "venus_sec_non_pixel";
+ };
+
+ venus_fw: qcom,iommu-ctx@1e36000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e36000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x8c0 0x8c6>;
+ label = "venus_fw";
+ };
+
+ periph_rpm: qcom,iommu-ctx@1e37000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e37000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x40>;
+ label = "periph_rpm";
+ };
+
+ qcom,iommu-ctx@1e38000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e38000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0xC0 0xC4 0xC8 0xCC 0xD0 0xD3
+ 0xD4 0xD7 0xD8 0xDB 0xDC 0xDF
+ 0xF0 0xF3 0xF4 0xF7 0xF8 0xFB
+ 0xFC 0xFF>;
+ label = "periph_CE";
+ };
+
+ qcom,iommu-ctx@1e39000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e39000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x280 0x283 0x284 0x287 0x288
+ 0x28B 0x28C 0x28F 0x290 0x293
+ 0x294 0x297 0x298 0x29B 0x29C
+ 0x29F>;
+ label = "periph_BLSP";
+ };
+
+ qcom,iommu-ctx@1e3a000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3a000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x100>;
+ label = "periph_SDC1";
+ };
+
+ qcom,iommu-ctx@1e3b000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3b000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x140>;
+ label = "periph_SDC2";
+ };
+
+ qcom,iommu-ctx@1e3c000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3c000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1c0>;
+ label = "periph_audio";
+ };
+
+ qcom,iommu-ctx@1e3d000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3d000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x2c0>;
+ label = "periph_USB_HS1";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi b/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi
new file mode 100644
index 0000000000000..11e707cef476f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-bus.dtsi
@@ -0,0 +1,858 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/soc/msm-bus-rule-ops.h>
+
+&soc {
+ ad_hoc_bus: ad-hoc-bus { };
+
+ static-rules {
+ compatible = "qcom,msm-bus-static-bw-rules";
+
+ rule0 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <1600000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_apss>;
+ qcom,dest-bw = <600000>;
+ };
+
+
+ rule1 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_apss>;
+ qcom,dest-bw = <1200000>;
+ };
+
+ rule2 {
+ qcom,src-nodes = <&mas_apss>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_GT>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_OFF>;
+ qcom,dest-node = <&mas_apss>;
+ };
+
+ rule3 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <1600000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_gfx>;
+ qcom,dest-bw = <600000>;
+ };
+
+ rule4 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_LE>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_ON>;
+ qcom,dest-node = <&mas_gfx>;
+ qcom,dest-bw = <1200000>;
+ };
+
+ rule5 {
+ qcom,src-nodes = <&mas_gfx>;
+ qcom,src-field = <FLD_IB>;
+ qcom,src-op = <OP_GT>;
+ qcom,thresh = <3200000>;
+ qcom,mode = <THROTTLE_OFF>;
+ qcom,dest-node = <&mas_gfx>;
+ };
+ };
+};
+
+&ad_hoc_bus {
+ compatible = "qcom,msm-bus-device";
+ reg = <0x580000 0x14000>,
+ <0x400000 0x62000>,
+ <0x500000 0x11000>;
+ reg-names = "snoc-base", "bimc-base", "pnoc-base";
+
+ fab_snoc: fab-snoc {
+ cell-id = <1024>;
+ label = "fab-snoc";
+ qcom,fab-dev;
+ qcom,base-name = "snoc-base";
+ qcom,base-offset = <0x7000>;
+ qcom,qos-off = <0x1000>;
+ qcom,bus-type = <1>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+ <&rpmcc RPM_SMD_SNOC_A_CLK>;
+ };
+
+ fab_bimc: fab-bimc {
+ cell-id = <0>;
+ label = "fab-bimc";
+ qcom,fab-dev;
+ qcom,base-name = "bimc-base";
+ qcom,bus-type = <2>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+ <&rpmcc RPM_SMD_BIMC_A_CLK>;
+ };
+
+ fab_pnoc: fab-pnoc {
+ cell-id = <4096>;
+ label = "fab-pnoc";
+ qcom,fab-dev;
+ qcom,base-name = "pnoc-base";
+ qcom,base-offset = <0x7000>;
+ qcom,qos-delta = <0x1000>;
+ qcom,bus-type = <1>;
+ clock-names = "bus_clk", "bus_a_clk";
+ clocks = <&rpmcc RPM_SMD_PCNOC_CLK>,
+ <&rpmcc RPM_SMD_PCNOC_A_CLK>;
+ };
+
+ /* SNOC Devices */
+ mas_video: mas-video {
+ cell-id = <63>;
+ label = "mas-video";
+ qcom,qport = <8>;
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_jpeg: mas-jpeg {
+ cell-id = <62>;
+ label = "mas-jpeg";
+ qcom,ap-owned;
+ qcom,qport = <6>;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_vfe: mas-vfe {
+ cell-id = <29>;
+ label = "mas-vfe";
+ qcom,ap-owned;
+ qcom,qport = <9>;
+ qcom,connections = <&mm_int_1 &mm_int_2>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_mdp: mas-mdp {
+ cell-id = <22>;
+ label = "mas-mdp";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_0 &mm_int_2>;
+ qcom,qport = <7>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qcom,buswidth = <16>;
+ };
+
+ mas_qdss_bam: mas-qdss-bam {
+ cell-id = <53>;
+ label = "mas-qdss-bam";
+ qcom,connections = <&qdss_int>;
+ qcom,qport = <11>;
+ qcom,bus-dev = <&fab_snoc>;
+ qom,buswidth = <4>;
+ qcom,ap-owned;
+ qcom,qos-mode = "fixed";
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ };
+
+ mas_snoc_cfg: mas-snoc-cfg {
+ cell-id = <54>;
+ label = "mas-snoc-cfg";
+ qcom,connections = <&qdss_int>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "bypass";
+ qom,buswidth = <4>;
+ qcom,mas-rpm-id = <20>;
+ };
+
+ mas_qdss_etr: mas-qdss-etr {
+ cell-id = <60>;
+ label = "mas-qdss-etr";
+ qcom,connections = <&qdss_int>;
+ qcom,qport = <10>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio1 = <1>;
+ qcom,prio0 = <1>;
+ qom,buswidth = <8>;
+ qcom,ap-owned;
+ };
+
+ mm_int_0: mm-int-0 {
+ cell-id = <10000>;
+ label = "mm-int-0";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_1: mm-int-1 {
+ cell-id = <10001>;
+ label = "mm-int1";
+ qcom,ap-owned;
+ qcom,connections = <&mm_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_2: mm-int-2 {
+ cell-id = <10002>;
+ label = "mm-int2";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ mm_int_bimc: mm-int-bimc {
+ cell-id = <10003>;
+ label = "mm-int-bimc";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_bimc_1_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <16>;
+ };
+
+ snoc_int_0: snoc-int-0 {
+ cell-id = <10004>;
+ label = "snoc-int-0";
+ qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <99>;
+ qcom,slv-rpm-id = <130>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_int_1: snoc-int-1 {
+ cell-id = <10005>;
+ label = "snoc-int-1";
+ qcom,connections = <&slv_apss &slv_cats_0 &slv_cats_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <100>;
+ qcom,slv-rpm-id = <131>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_int_bimc: snoc-int-bmc {
+ cell-id = <10006>;
+ label = "snoc-bimc";
+ qcom,connections = <&snoc_bimc_0_mas>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <101>;
+ qcom,slv-rpm-id = <132>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_bimc_0_mas: snoc-bimc-0-mas {
+ cell-id = <10007>;
+ label = "snoc-bimc-0-mas";
+ qcom,connections = <&snoc_bimc_0_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,mas-rpm-id = <3>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_bimc_1_mas: snoc-bimc-1-mas {
+ cell-id = <10008>;
+ label = "snoc-bimc-1-mas";
+ qcom,connections = <&snoc_bimc_1_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,ap-owned;
+ qcom,buswidth = <16>;
+ };
+
+ qdss_int: qdss-int {
+ cell-id = <10009>;
+ label = "qdss-int";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0 &snoc_int_bimc>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ bimc_snoc_slv: bimc-snoc-slv {
+ cell-id = <10017>;
+ label = "bimc_snoc_slv";
+ qcom,ap-owned;
+ qcom,connections = <&snoc_int_0 &snoc_int_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ snoc_pnoc_mas: snoc-pnoc-mas {
+ cell-id = <10027>;
+ label = "snoc-pnoc-mas";
+ qcom,connections = <&snoc_pnoc_slv>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_snoc_slv: pnoc-snoc-slv {
+ cell-id = <10011>;
+ label = "snoc-pnoc";
+ qcom,connections = <&snoc_int_0 &snoc_int_bimc &snoc_int_1>;
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <45>;
+ qcom,buswidth = <8>;
+ };
+
+ slv_srvc_snoc: slv-srvc-snoc {
+ cell-id = <587>;
+ label = "snoc-srvc-snoc";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <29>;
+ qcom,buswidth = <8>;
+ };
+
+ slv_qdss_stm: slv-qdss-stm {
+ cell-id = <588>;
+ label = "snoc-qdss-stm";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <4>;
+ qcom,slv-rpm-id = <30>;
+ };
+
+ slv_imem: slv-imem {
+ cell-id = <519>;
+ label = "slv_imem";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,buswidth = <8>;
+ qcom,slv-rpm-id = <26>;
+ };
+
+ slv_apss: slv-apss {
+ cell-id = <517>;
+ label = "slv_apss";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <20>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_cats_0: slv-cats-0 {
+ cell-id = <663>;
+ label = "slv-cats-0";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <106>;
+ qcom,buswidth = <16>;
+ };
+
+ slv_cats_1: slv-cats-1 {
+ cell-id = <664>;
+ label = "slv-cats-1";
+ qcom,bus-dev = <&fab_snoc>;
+ qcom,slv-rpm-id = <107>;
+ qcom,buswidth = <8>;
+ };
+
+ /* BIMC nodes */
+ mas_apss: mas-apss {
+ cell-id = <1>;
+ label = "mas-apss";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <0>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qcom,ws = <10000>;
+ qcom,gp = <5000>;
+ qcom,thmp = <50>;
+ qom,buswidth = <8>;
+ };
+
+ mas_tcu0: mas-tcu0 {
+ cell-id = <104>;
+ label = "mas-tcu0";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <5>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <2>;
+ qcom,prio-rd = <2>;
+ qcom,prio-wr = <2>;
+ qom,buswidth = <8>;
+ };
+
+ mas_tcu1: mas-tcu1 {
+ cell-id = <105>;
+ label = "mas-tcu1";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <6>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <2>;
+ qcom,prio-rd = <2>;
+ qcom,prio-wr = <2>;
+ qom,buswidth = <8>;
+ };
+
+ mas_gfx: mas-gfx {
+ cell-id = <26>;
+ label = "mas-gfx";
+ qcom,ap-owned;
+ qcom,connections = <&slv_ebi_ch0 &bimc_snoc_mas &slv_apps_l2>;
+ qcom,qport = <2>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,qos-mode = "fixed";
+ qcom,prio-lvl = <0>;
+ qcom,prio-rd = <0>;
+ qcom,prio-wr = <0>;
+ qom,buswidth = <8>;
+ qcom,ws = <10000>;
+ qcom,gp = <5000>;
+ qcom,thmp = <50>;
+ };
+
+ bimc_snoc_mas: bimc-snoc-mas {
+ cell-id = <10016>;
+ label = "bimc_snoc_mas";
+ qcom,ap-owned;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,connections = <&bimc_snoc_slv>;
+ qom,buswidth = <8>;
+ };
+
+ snoc_bimc_0_slv: snoc-bimc-0-slv {
+ cell-id = <10025>;
+ label = "snoc_bimc_0_slv";
+ qcom,connections = <&slv_ebi_ch0>;
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,slv-rpm-id = <24>;
+ qom,buswidth = <8>;
+ };
+
+ snoc_bimc_1_slv: snoc_bimc_1_slv {
+ cell-id = <10026>;
+ label = "snoc_bimc_1_slv";
+ qcom,connections = <&slv_ebi_ch0>;
+ qcom,ap-owned;
+ qcom,bus-dev = <&fab_bimc>;
+ qom,buswidth = <8>;
+ };
+
+ slv_ebi_ch0: slv-ebi-ch0 {
+ cell-id = <512>;
+ label = "slv-ebi-ch0";
+ qcom,bus-dev = <&fab_bimc>;
+ qcom,slv-rpm-id = <0>;
+ qom,buswidth = <8>;
+ };
+
+ slv_apps_l2: slv-apps-l2 {
+ cell-id = <514>;
+ label = "slv-apps-l2";
+ qcom,bus-dev = <&fab_bimc>;
+ qom,buswidth = <8>;
+ };
+
+ /* PNOC nodes */
+ snoc_pnoc_slv: snoc-pnoc-slv {
+ cell-id = <10028>;
+ label = "snoc-pnoc-slv";
+ qcom,connections = <&pnoc_int_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_int_0: pnoc-int-0 {
+ cell-id = <10012>;
+ label = "pnoc-int-0";
+ qcom,connections = <&pnoc_snoc_mas &pnoc_s_0 &pnoc_s_1 &pnoc_s_2
+ &pnoc_s_3 &pnoc_s_4 &pnoc_s_8 &pnoc_s_9>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_int_1: pnoc-int-1 {
+ cell-id = <10013>;
+ label = "pnoc-int-1";
+ qcom,connections = <&pnoc_snoc_mas>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_m_0: pnoc-m-0 {
+ cell-id = <10014>;
+ label = "pnoc-m-0";
+ qcom,connections = <&pnoc_int_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_m_1: pnoc-m-1 {
+ cell-id = <10015>;
+ label = "pnoc-m-1";
+ qcom,connections = <&pnoc_snoc_mas>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <8>;
+ };
+
+ pnoc_s_0: pnoc-s-0 {
+ cell-id = <10018>;
+ label = "pnoc-s-0";
+ qcom,connections = <&slv_clk_ctl &slv_tlmm &slv_tcsr
+ &slv_security &slv_mss>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_1: pnoc-s-1 {
+ cell-id = <10019>;
+ label = "pnoc-s-1";
+ qcom,connections = <&slv_imem_cfg &slv_crypto_0_cfg
+ &slv_msg_ram &slv_pdm &slv_prng>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_2: pnoc-s-2 {
+ cell-id = <10020>;
+ label = "pnoc-s-2";
+ qcom,connections = <&slv_spdm &slv_boot_rom &slv_bimc_cfg
+ &slv_pnoc_cfg &slv_pmic_arb>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_3: pnoc-s-3 {
+ cell-id = <10021>;
+ label = "pnoc-s-3";
+ qcom,connections = <&slv_mpm &slv_snoc_cfg &slv_rbcpr_cfg
+ &slv_qdss_cfg &slv_dehr_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_4: pnoc-s-4 {
+ cell-id = <10022>;
+ label = "pnoc-s-4";
+ qcom,connections = <&slv_venus_cfg &slv_camera_cfg
+ &slv_display_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ };
+
+ pnoc_s_8: pnoc-s-8 {
+ cell-id = <10023>;
+ label = "pnoc-s-8";
+ qcom,connections = <&slv_usb_hs &slv_sdcc_1 &slv_blsp_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ pnoc_s_9: pnoc-s-9 {
+ cell-id = <10024>;
+ label = "pnoc-s-9";
+ qcom,connections = <&slv_sdcc_2 &slv_audio &slv_gfx_cfg>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qom,buswidth = <4>;
+ };
+
+ slv_imem_cfg: slv-imem-cfg {
+ cell-id = <627>;
+ label = "slv-imem-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_crypto_0_cfg: slv-crypto-0-cfg {
+ cell-id = <625>;
+ label = "slv-crypto-0-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_msg_ram: slv-msg-ram {
+ cell-id = <535>;
+ label = "slv-msg-ram";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pdm: slv-pdm {
+ cell-id = <577>;
+ label = "slv-pdm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_prng: slv-prng {
+ cell-id = <618>;
+ label = "slv-prng";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_clk_ctl: slv-clk-ctl {
+ cell-id = <620>;
+ label = "slv-clk-ctl";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_mss: slv-mss {
+ cell-id = <521>;
+ label = "slv-mss";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_tlmm: slv-tlmm {
+ cell-id = <624>;
+ label = "slv-tlmm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_tcsr: slv-tcsr {
+ cell-id = <579>;
+ label = "slv-tcsr";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_security: slv-security {
+ cell-id = <622>;
+ label = "slv-security";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_spdm: slv-spdm {
+ cell-id = <533>;
+ label = "slv-spdm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pnoc_cfg: slv-pnoc-cfg {
+ cell-id = <641>;
+ label = "slv-pnoc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_pmic_arb: slv-pmic-arb {
+ cell-id = <632>;
+ label = "slv-pmic-arb";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_bimc_cfg: slv-bimc-cfg {
+ cell-id = <629>;
+ label = "slv-bimc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_boot_rom: slv-boot-rom {
+ cell-id = <630>;
+ label = "slv-boot-rom";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_mpm: slv-mpm {
+ cell-id = <536>;
+ label = "slv-mpm";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_qdss_cfg: slv-qdss-cfg {
+ cell-id = <635>;
+ label = "slv-qdss-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_rbcpr_cfg: slv-rbcpr-cfg {
+ cell-id = <636>;
+ label = "slv-rbcpr-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_snoc_cfg: slv-snoc-cfg {
+ cell-id = <647>;
+ label = "slv-snoc-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_dehr_cfg: slv-dehr-cfg {
+ cell-id = <634>;
+ label = "slv-dehr-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_venus_cfg: slv-venus-cfg {
+ cell-id = <596>;
+ label = "slv-venus-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_display_cfg: slv-display-cfg {
+ cell-id = <590>;
+ label = "slv-display-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_camera_cfg: slv-camera-cfg {
+ cell-id = <589>;
+ label = "slv-camer-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_usb_hs: slv-usb-hs {
+ cell-id = <614>;
+ label = "slv-usb-hs";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_sdcc_1: slv-sdcc-1 {
+ cell-id = <606>;
+ label = "slv-sdcc-1";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_blsp_1: slv-blsp-1 {
+ cell-id = <613>;
+ label = "slv-blsp-1";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_sdcc_2: slv-sdcc-2 {
+ cell-id = <609>;
+ label = "slv-sdcc-2";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_gfx_cfg: slv-gfx-cfg {
+ cell-id = <598>;
+ label = "slv-gfx-cfg";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ slv_audio: slv-audio {
+ cell-id = <522>;
+ label = "slv-audio";
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_blsp_1: mas-blsp_1 {
+ cell-id = <86>;
+ label = "mas-blsp-1";
+ qcom,connections = <&pnoc_m_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_spdm: mas-spdm {
+ cell-id = <36>;
+ label = "mas-spdm";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_dehr: mas-dehr {
+ cell-id = <75>;
+ label = "mas-dehr";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_audio: mas-audio {
+ cell-id = <15>;
+ label = "mas-audio";
+ qcom,connections = <&pnoc_m_0>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_usb_hs: mas-usb-hs {
+ cell-id = <87>;
+ label = "mas-usb-hs";
+ qcom,connections = <&pnoc_m_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <4>;
+ };
+
+ mas_pnoc_crypto_0: mas-pnoc-crypto-0 {
+ cell-id = <55>;
+ label = "mas-pnoc-crypto-0";
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ mas_pnoc_sdcc_1: mas-pnoc-sdcc-1 {
+ cell-id = <78>;
+ label = "mas-pnoc-sdcc-1";
+ qcom,qport = <7>;
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ mas_pnoc_sdcc_2: mas-pnoc-sdcc-2 {
+ cell-id = <81>;
+ label = "mas-pnoc-sdcc-2";
+ qcom,qport = <8>;
+ qcom,connections = <&pnoc_int_1>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,buswidth = <8>;
+ };
+
+ pnoc_snoc_mas: pnoc-snoc-mas {
+ cell-id = <10010>;
+ label = "pnoc-snoc-mas";
+ qcom,connections = <&pnoc_snoc_slv>;
+ qcom,bus-dev = <&fab_pnoc>;
+ qcom,mas-rpm-id = <29>;
+ qcom,buswidth = <8>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi
new file mode 100644
index 0000000000000..c008dc7a32bb9
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+ tpiu@820000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x820000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ port {
+ tpiu_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out1>;
+ };
+ };
+ };
+
+ funnel@821000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x821000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 0 - connected to Resource and Power Manger CPU ETM
+ * 1 - not-connected
+ * 2 - connected to Modem CPU ETM
+ * 3 - not-connected
+ * 5 - not-connected
+ * 6 - connected trought funnel to Wireless CPU ETM
+ * 7 - connected to STM component
+ */
+ port@4 {
+ reg = <4>;
+ funnel0_in4: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ funnel0_out: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+ };
+ };
+
+ replicator@824000 {
+ compatible = "qcom,coresight-replicator1x", "arm,primecell";
+ reg = <0x824000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etr_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ replicator_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out>;
+ };
+ };
+ };
+ };
+
+ etf@825000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x825000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ etf_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ etf_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+ };
+ };
+
+ etr@826000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x826000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ port {
+ etr_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out0>;
+ };
+ };
+ };
+
+ funnel@841000 { /* APSS funnel only 4 inputs are used */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x841000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel1_in0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel1_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ funnel1_in2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ funnel1_in3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ port@4 {
+ reg = <0>;
+ funnel1_out: endpoint {
+ remote-endpoint = <&funnel0_in4>;
+ };
+ };
+ };
+ };
+
+ etm@85c000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85c000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU0>;
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel1_in0>;
+ };
+ };
+ };
+
+ etm@85d000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85d000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU1>;
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel1_in1>;
+ };
+ };
+ };
+
+ etm@85e000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85e000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU2>;
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel1_in2>;
+ };
+ };
+ };
+
+ etm@85f000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85f000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU3>;
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel1_in3>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi
new file mode 100644
index 0000000000000..82acb8df2a8a7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi
@@ -0,0 +1,21 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm-iommu-v2.dtsi"
+
+&gfx_iommu {
+ status = "ok";
+};
+
+&apps_iommu {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi
new file mode 100644
index 0000000000000..ffed9ffd0ecd0
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi
@@ -0,0 +1,118 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gpu: adreno-3xx@01c00000 {
+ compatible = "qcom,adreno-3xx";
+ #stream-id-cells = <16>;
+ reg = <0x01c00000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <0 33 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "alt_mem_iface_clk",
+ "gfx3d_clk_src";
+ clocks =
+ <&gcc GCC_OXILI_GFX3D_CLK>,
+ <&gcc GCC_OXILI_AHB_CLK>,
+ <&gcc GCC_OXILI_GMEM_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_BIMC_GPU_CLK>,
+ <&gcc GFX3D_CLK_SRC>;
+ power-domains = <&gcc OXILI_GDSC>;
+ qcom,chipid = <0x03000600>;
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <400000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <19200000>;
+ };
+ };
+ };
+
+ mdp: mdp@1a00000 {
+ compatible = "qcom,mdp5";
+ reg = <0x1a00000 0x90000>,
+ <0x1ac8000 0x3000>;
+ reg-names = "mdp_phys", "vbif_phys";
+ interrupts = <0 72 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ connectors = <&dsi0>;
+ gpus = <&gpu>;
+
+ qcom,msm-bus,name = "mdss_mdp";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>,
+ <22 512 0 6400000>,
+ <22 512 0 6400000>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc MDP_CLK_SRC>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk", "bus_clk", "core_clk_src",
+ "core_clk", "vsync_clk";
+ };
+
+ dsi0: dsi@1a98000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ qcom,dsi-host-index = <0>;
+ reg = <0x1a98000 0x25c>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdp>;
+ interrupts = <4 0>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>,
+ <&gcc BYTE0_CLK_SRC>,
+ <&gcc PCLK0_CLK_SRC>;
+ clock-names = "mdp_core_clk", "iface_clk", "bus_clk",
+ "core_mmss_clk", "byte_clk", "pixel_clk",
+ "core_clk", "byte_clk_src", "pixel_clk_src";
+ qcom,dsi-phy = <&dsi_phy0>;
+ };
+
+ dsi_phy0: dsi-phy@1a98300 {
+ compatible = "qcom,dsi-phy-28nm-lp";
+ qcom,dsi-phy-index = <0>;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
+ reg = <0x1a98300 0xd4>,
+ <0x1a98500 0x280>,
+ <0x1a98780 0x30>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
index b0a064d3806b3..2cdaa651c3357 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
@@ -13,10 +13,13 @@
/dts-v1/;
+#include <dt-bindings/arm/qcom-ids.h>
#include "msm8916-mtp.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM 8916 MTP";
compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp/1",
"qcom,msm8916", "qcom,mtp";
+ qcom,board-id = <QCOM_BRD_ID(MTP, 1, 0) QCOM_BRD_SUBTYPE_DEFAULT>,
+ <QCOM_BRD_ID(MTP, 1, 0) QCOM_BRD_SUBTYPE_MTP8916_SMB1360>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
index ceeb8a6feed65..529fe53422cd0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
@@ -33,3 +33,11 @@
};
};
};
+
+&blsp_dma {
+ status = "okay";
+};
+
+&blsp_spi3 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
index 10c83e11c272f..7a0fc73b85d32 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -720,4 +720,29 @@
};
};
};
+
+ wcnss_default: wcnss_default {
+ pinmux2 {
+ function = "wcss_wlan";
+ pins = "gpio40";
+ };
+ pinmux1 {
+ function = "wcss_wlan";
+ pins = "gpio41";
+ };
+ pinmux0 {
+ function = "wcss_wlan";
+ pins = "gpio42";
+ };
+ pinmux {
+ function = "wcss_wlan";
+ pins = "gpio43", "gpio44";
+ };
+ pinconf {
+ pins = "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 96812007850ec..a91091e6cf699 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -14,10 +14,19 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8916.h>
#include <dt-bindings/reset/qcom,gcc-msm8916.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/arm/qcom-ids.h>
/ {
model = "Qualcomm Technologies, Inc. MSM8916";
compatible = "qcom,msm8916";
+ qcom,msm-id = <QCOM_ID_MSM8916 0>,
+ <QCOM_ID_MSM8216 0>,
+ <QCOM_ID_MSM8116 0>,
+ <QCOM_ID_MSM8616 0>,
+ <QCOM_ID_APQ8016 0>;
+
interrupt-parent = <&intc>;
@@ -51,6 +60,26 @@
reg = <0x0 0x86300000 0x0 0x0100000>;
no-map;
};
+
+ hypervisor_mem: hypervisor_region@86400000 {
+ no-map;
+ reg = <0x0 0x86400000 0x0 0x0400000>;
+ };
+
+ modem_adsp_mem: modem_adsp_region@86800000 {
+ no-map;
+ reg = <0x0 0x86800000 0x0 0x04800000>;
+ };
+
+ peripheral_mem: peripheral_region@8b600000 {
+ no-map;
+ reg = <0x0 0x8b600000 0x0 0x0600000>;
+ };
+
+ vidc_mem: vidc_region@8f800000 {
+ no-map;
+ reg = <0 0x8f800000 0 0x800000>;
+ };
};
cpus {
@@ -62,32 +91,130 @@
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0>;
next-level-cache = <&L2_0>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x1>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc1>;
next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
CPU2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x2>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc2>;
next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
CPU3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x3>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc3>;
next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
L2_0: l2-cache {
compatible = "cache";
cache-level = <2>;
+ power-domain = <&l2ccc_0>;
+ };
+ };
+
+ cpu-pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>;
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 4>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 3>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
@@ -100,12 +227,6 @@
};
clocks {
- xo_board: xo_board {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <19200000>;
- };
-
sleep_clk: sleep_clk {
compatible = "fixed-clock";
#clock-cells = <0>;
@@ -113,15 +234,6 @@
};
};
- smem {
- compatible = "qcom,smem";
-
- memory-region = <&smem_mem>;
- qcom,rpm-msg-ram = <&rpm_msg_ram>;
-
- hwlocks = <&tcsr_mutex 3>;
- };
-
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -183,6 +295,13 @@
reg = <0x0b011000 0x1000>;
};
+ a53cc: qcom,a53cc@0b016000 {
+ compatible = "qcom,clock-a53-msm8916";
+ reg = <0x0b016000 0x40>;
+ #clock-cells = <1>;
+ qcom,apcs = <&apcs>;
+ };
+
blsp1_uart2: serial@78b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x78b0000 0x200>;
@@ -352,6 +471,11 @@
status = "disabled";
};
+ wcd_digital: codec-digital{
+ compatible = "syscon", "qcom,apq8016-wcd-digital-codec";
+ reg = <0x0771c000 0x400>;
+ };
+
lpass: lpass@07708000 {
status = "disabled";
compatible = "qcom,lpass-cpu-apq8016";
@@ -378,6 +502,14 @@
reg-names = "lpass-lpaif";
};
+ sound: sound {
+ status = "disabled";
+ compatible = "qcom,apq8016-sbc-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+ };
+
+
sdhc_1: sdhci@07824000 {
compatible = "qcom,sdhci-msm-v4";
reg = <0x07824900 0x11c>, <0x07824000 0x800>;
@@ -436,6 +568,14 @@
qcom,otg-control = <2>; // PMIC
qcom,manual-pullup;
+ qcom,msm-bus,name = "usb2";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <87 512 0 0>,
+ <87 512 80000 0>,
+ <87 512 6000 6000>;
+
clocks = <&gcc GCC_USB_HS_AHB_CLK>,
<&gcc GCC_USB_HS_SYSTEM_CLK>,
<&gcc GCC_USB2A_PHY_SLEEP_CLK>;
@@ -454,6 +594,11 @@
reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
};
+ l2ccc_0: clock-controller@b011000 {
+ compatible = "qcom,8916-l2ccc";
+ reg = <0x0b011000 0x1000>;
+ };
+
timer@b020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -537,6 +682,329 @@
clocks = <&gcc GCC_PRNG_AHB_CLK>;
clock-names = "core";
};
+ acc0: clock-controller@b088000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b088000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc1: clock-controller@b098000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b098000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc2: clock-controller@b0a8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0a8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc3: clock-controller@b0b8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0b8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-msm8916", "syscon";
+ reg = <0x1937000 0x30000>;
+ };
+
+ uqfprom: eeprom@58000 {
+ compatible = "qcom,qfprom-msm8916";
+ reg = <0x58000 0x7000>;
+ };
+
+ cpr@b018000 {
+ compatible = "qcom,cpr";
+ reg = <0xb018000 0x1000>;
+ interrupts = <0 15 1>, <0 16 1>, <0 17 1>;
+ vdd-mx-supply = <&pm8916_l3>;
+ acc-syscon = <&tcsr>;
+ eeprom = <&uqfprom>;
+
+ qcom,cpr-ref-clk = <19200>;
+ qcom,cpr-timer-delay-us = <5000>;
+ qcom,cpr-timer-cons-up = <0>;
+ qcom,cpr-timer-cons-down = <2>;
+ qcom,cpr-up-threshold = <0>;
+ qcom,cpr-down-threshold = <2>;
+ qcom,cpr-idle-clocks = <15>;
+ qcom,cpr-gcnt-us = <1>;
+ qcom,vdd-apc-step-up-limit = <1>;
+ qcom,vdd-apc-step-down-limit = <1>;
+ qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ };
+
+ qfprom: qfprom@5c000 {
+ compatible = "qcom,qfprom";
+ reg = <0x5c000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ tsens_caldata: caldata@d0 {
+ reg = <0xd0 0x8>;
+ };
+ tsens_calsel: calsel@ec {
+ reg = <0xec 0x4>;
+ };
+ };
+
+ tsens: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8916-tsens";
+ reg = <0x4a8000 0x2000>;
+ nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+ nvmem-cell-names = "calib", "calib_sel";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200>;
+ qcom,sensor-id = <0 1 2 4 5>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ q6-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <435>, <428>;
+ interrupts = <0 27 1>;
+ qcom,ipc = <&apcs 8 14>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <1>;
+
+ q6_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ qcom,outbound;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ q6_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ qcom,inbound;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ wcnss-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <451>, <431>;
+
+ interrupts = <0 143 1>;
+
+ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+
+ wcnss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ qcom,outbound;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ wcnss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ qcom,inbound;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ qcom,mss@4080000 {
+ compatible = "qcom,pil-q6v56-mss", "qcom,q6v5-pil";
+ reg = <0x04080000 0x100>,
+ <0x04020000 0x040>,
+ <0x01810000 0x004>,
+ <0x01810000 0x004>,
+ <0x0194f000 0x010>,
+ <0x01950000 0x008>,
+ <0x01951000 0x008>;
+
+ reg-names = "qdsp6_base", "rmb_base", "restart_reg_sec",
+ "halt_q6", "halt_modem", "halt_nc";
+
+ interrupts-extended = <&intc 0 24 1>,
+ <&q6_smp2p_in 0 0>,
+ <&q6_smp2p_in 1 0>,
+ <&q6_smp2p_in 2 0>,
+ <&q6_smp2p_in 3 0>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ clocks = <&gcc GCC_MSS_CFG_AHB_CLK>, <&gcc GCC_MSS_Q6_BIMC_AXI_CLK>, <&gcc GCC_BOOT_ROM_AHB_CLK>;
+
+ clock-names = "iface", "bus", "mem";
+
+ qcom,mx-supply = <&pm8916_l3>;
+ qcom,mx-uV = <1050000>;
+ qcom,pll-supply = <&pm8916_l7>;
+ qcom,pll-uV = <1800000>;
+ qcom,proxy-clock-names = "xo";
+ qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+ qcom,is-loadable;
+ qcom,firmware-name = "modem";
+ qcom,pil-self-auth;
+
+
+ /* GPIO inputs from mss */
+ qcom,gpio-err-fatal = <&q6_smp2p_in 0 0>;
+ qcom,gpio-err-ready = <&q6_smp2p_in 1 0>;
+ qcom,gpio-proxy-unvote = <&q6_smp2p_in 2 0>;
+ qcom,gpio-stop-ack = <&q6_smp2p_in 3 0>;
+ qcom,gpio-ramdump-disable = <&q6_smp2p_in 15 0>;
+ /* GPIO output to mss */
+ qcom,gpio-force-stop = <&q6_smp2p_out 0 0>;
+ qcom,stop-gpio = <&q6_smp2p_out 0 0>;
+ memory-region = <&modem_adsp_mem>;
+ };
+
+ pronto_rproc:pronto_rproc {
+ compatible = "qcom,tz-pil";
+
+ interrupts-extended = <&intc 0 149 1>,
+ <&wcnss_smp2p_in 0 0>,
+ <&wcnss_smp2p_in 1 0>,
+ <&wcnss_smp2p_in 2 0>,
+ <&wcnss_smp2p_in 3 0>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ clocks = <&gcc GCC_CRYPTO_CLK>,
+ <&gcc GCC_CRYPTO_AHB_CLK>,
+ <&gcc GCC_CRYPTO_AXI_CLK>,
+ <&gcc CRYPTO_CLK_SRC>;
+ clock-names = "scm_core_clk", "scm_iface_clk", "scm_bus_clk", "scm_src_clk";
+
+ qcom,firmware-name = "wcnss";
+ qcom,pas-id = <6>;
+
+ qcom,crash-reason = <422>;
+ qcom,smd-edges = <&pronto_smd_edge>;
+
+ qcom,pll-supply = <&pm8916_l7>;
+ qcom,pll-uV = <1800000>;
+ qcom,pll-uA = <18000>;
+
+ qcom,stop-gpio = <&wcnss_smp2p_out 0 0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcnss_default>;
+
+ memory-region = <&peripheral_mem>;
+ };
+
+ qcom,wcn36xx@0a000000 {
+ compatible = "qcom,wcn3620";
+ reg = <0x0a000000 0x280000>,
+ <0xb011008 0x04>,
+ <0x0a21b000 0x3000>,
+ <0x03204000 0x00000100>,
+ <0x03200800 0x00000200>,
+ <0x0A100400 0x00000200>,
+ <0x0A205050 0x00000200>,
+ <0x0A219000 0x00000020>,
+ <0x0A080488 0x00000008>,
+ <0x0A080fb0 0x00000008>,
+ <0x0A08040c 0x00000008>,
+ <0x0A0120a8 0x00000008>,
+ <0x0A012448 0x00000008>,
+ <0x0A080c00 0x00000001>;
+
+ reg-names = "wcnss_mmio", "wcnss_fiq",
+ "pronto_phy_base", "riva_phy_base",
+ "riva_ccu_base", "pronto_a2xb_base",
+ "pronto_ccpu_base", "pronto_saw2_base",
+ "wlan_tx_phy_aborts","wlan_brdg_err_source",
+ "wlan_tx_status", "alarms_txctl",
+ "alarms_tactl", "pronto_mcu_base";
+
+ interrupts = <0 145 0 0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ // qcom,pronto-vddmx-supply = <&pm8916_l3>;
+ // qcom,pronto-vddcx-supply = <&pm8916_s1_corner>;
+ // qcom,pronto-vddpx-supply = <&pm8916_l7>;
+ // qcom,iris-vddxo-supply = <&pm8916_l7>;
+ // qcom,iris-vddrfa-supply = <&pm8916_s3>;
+ // qcom,iris-vddpa-supply = <&pm8916_l9>;
+ // qcom,iris-vdddig-supply = <&pm8916_l5>;
+
+ pinctrl-names = "wcnss_default";
+ // pinctrl-names = "wcnss_default", "wcnss_sleep",
+ // "wcnss_gpio_default";
+ pinctrl-0 = <&wcnss_default>;
+ // pinctrl-1 = <&wcnss_sleep>;
+ // pinctrl-2 = <&wcnss_gpio_default>;
+
+ // clocks = <&rpmcc RPM_XO_CLK_SRC>,
+ // <&rpmcc RPM_RF_CLK2>;
+ //clock-names = "xo", "rf_clk";
+
+ rproc = <&pronto_rproc>;
+ qcom,has-autodetect-xo;
+ qcom,wlan-rx-buff-count = <512>;
+ qcom,is-pronto-vt;
+ qcom,has-pronto-hw;
+ // qcom,wcnss-adc_tm = <&pm8916_adc_tm>;
+ };
+
+
+
+ qcom,rpm-log@29dc00 {
+ compatible = "qcom,rpm-log";
+ reg = <0x29dc00 0x4000>;
+ qcom,rpm-addr-phys = <0x200000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ vidc_rproc: vidc_tzpil@0 {
+ compatible = "qcom,tz-pil";
+ clocks = <&gcc GCC_CRYPTO_CLK>,
+ <&gcc GCC_CRYPTO_AHB_CLK>,
+ <&gcc GCC_CRYPTO_AXI_CLK>,
+ <&gcc CRYPTO_CLK_SRC>;
+ clock-names = "scm_core_clk", "scm_iface_clk",
+ "scm_bus_clk", "scm_src_clk";
+ qcom,firmware-name = "venus";
+ qcom,pas-id = <9>;
+ memory-region = <&vidc_mem>;
+ status = "disabled";
+ };
+
+ vidc: qcom,vidc@1d00000 {
+ compatible = "qcom,msm-vidc";
+ reg = <0x01d00000 0xff000>;
+ interrupts = <GIC_SPI 44 0>;
+ power-domains = <&gcc VENUS_GDSC>;
+ clocks = <&gcc GCC_VENUS0_VCODEC0_CLK>,
+ <&gcc GCC_VENUS0_AHB_CLK>,
+ <&gcc GCC_VENUS0_AXI_CLK>;
+ clock-names = "core_clk", "iface_clk", "bus_clk";
+ qcom,hfi = "venus";
+ qcom,max-hw-load = <352800>; /* 720p @ 30 + 1080p @ 30 */
+ qcom,enable-idle-indicator;
+ rproc = <&vidc_rproc>;
+ qcom,iommu-cb = <&venus_ns>,
+ <&venus_sec_bitstream>,
+ <&venus_sec_pixel>,
+ <&venus_sec_non_pixel>;
+ status = "disabled";
+ };
+ };
+
+ smem {
+ compatible = "qcom,smem";
+
+ memory-region = <&smem_mem>;
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+
+ hwlocks = <&tcsr_mutex 3>;
};
smd {
@@ -546,16 +1014,21 @@
interrupts = <GIC_SPI 168 IRQ_TYPE_EDGE_RISING>;
qcom,ipc = <&apcs 8 0>;
qcom,smd-edge = <15>;
+ qcom,remote-pid = <0xffffffff>;
rpm_requests {
compatible = "qcom,rpm-msm8916";
qcom,smd-channels = "rpm_requests";
rpmcc: qcom,rpmcc {
- compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+ compatible = "qcom,rpmcc-msm8916";
#clock-cells = <1>;
};
+ msm-bus {
+ compatible = "qcom,rpm-msm-bus";
+ };
+
smd_rpm_regulators: pm8916-regulators {
compatible = "qcom,rpm-pm8916-regulators";
@@ -584,7 +1057,60 @@
};
};
};
+
+ qcom,smd-modem {
+ interrupts = <0 25 1>;
+ qcom,smd-edge = <0>;
+ qcom,ipc = <&apcs 8 12>;
+ qcom,remote-pid = <1>;
+ ipcrtr_requests {
+ compatible = "qcom,ipcrtr";
+ qcom,smd-channels = "IPCRTR";
+ };
+ };
+
+ pronto_smd_edge: pronto {
+ interrupts = <0 142 1>;
+
+ qcom,ipc = <&apcs 8 17>;
+ qcom,smd-edge = <6>;
+ qcom,remote-pid = <4>;
+
+ bt {
+ compatible = "qcom,hci-smd";
+ qcom,smd-channels = "APPS_RIVA_BT_CMD", "APPS_RIVA_BT_ACL";
+ qcom,smd-channel-names = "event", "data";
+ };
+
+ ipcrtr {
+ compatible = "qcom,ipcrtr";
+ qcom,smd-channels = "IPCRTR";
+ };
+
+ wifi {
+ compatible = "qcom,wlan-ctrl";
+ qcom,smd-channels = "WLAN_CTRL";
+
+ interrupts = <0 145 0>, <0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,wcnss_mmio = <0xfb000000 0x21b000>;
+
+ // qcom,tx-enable-gpios = <&apps_smsm 10 0>;
+ // qcom,tx-rings-empty-gpios = <&apps_smsm 9 0>;
+ };
+
+ wcnss_ctrl {
+ compatible = "qcom,wcnss-ctrl";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,wcnss_mmio = <0xfb21b000 0x3000>;
+ };
+ };
};
};
#include "msm8916-pins.dtsi"
+#include "msm8916-iommu.dtsi"
+#include "msm8916-coresight.dtsi"
+#include "msm8916-bus.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
index 9bab5c011c070..363fbc67c76a6 100644
--- a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
@@ -12,6 +12,7 @@
*/
#include "msm8996.dtsi"
+#include "pm8994.dtsi"
/ {
aliases {
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index 0506fb808c560..4d6663561df1e 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -13,10 +13,12 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8996.h>
#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
/ {
model = "Qualcomm Technologies, Inc. MSM8996";
+ qcom,msm-id = <291 0x30001>;
interrupt-parent = <&intc>;
#address-cells = <2>;
@@ -39,6 +41,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x0>;
enable-method = "psci";
+ clocks = <&kryocc 0>;
+ cpu-supply = <&pm8994_s11>;
next-level-cache = <&L2_0>;
L2_0: l2-cache {
compatible = "cache";
@@ -51,6 +55,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x1>;
enable-method = "psci";
+ clocks = <&kryocc 0>;
+ cpu-supply = <&pm8994_s11>;
next-level-cache = <&L2_0>;
};
@@ -59,6 +65,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x100>;
enable-method = "psci";
+ clocks = <&kryocc 1>;
+ cpu-supply = <&pm8994_s11>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
compatible = "cache";
@@ -71,6 +79,8 @@
compatible = "qcom,kryo";
reg = <0x0 0x101>;
enable-method = "psci";
+ clocks = <&kryocc 1>;
+ cpu-supply = <&pm8994_s11>;
next-level-cache = <&L2_1>;
};
@@ -119,6 +129,7 @@
clock-frequency = <32764>;
clock-output-names = "sleep_clk";
};
+
};
psci {
@@ -126,6 +137,14 @@
method = "smc";
};
+ firmware {
+ compatible = "simple-bus";
+
+ scm {
+ compatible = "qcom,scm";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -151,6 +170,24 @@
reg = <0x300000 0x90000>;
};
+ cbfcc: syscon@9a20000 {
+ compatible = "syscon";
+ reg = <0x9a20000 0x10000>;
+ };
+
+ kryocc: clock-controller@6400000 {
+ compatible = "qcom,cpu-clk-msm8996";
+ reg = <0x6400000 0x90000>;
+ #clock-cells = <1>;
+ qcom,cbf = <&cbfcc>;
+ };
+
+ tsens0: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8996-tsens";
+ reg = <0x4a8000 0x2000>;
+ #thermal-sensor-cells = <1>;
+ };
+
blsp2_uart1: serial@75b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x75b0000 0x1000>;
@@ -169,6 +206,58 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+
+ hdmi_hpd_active: hdmi_hpd_active {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
+
+ hdmi_hpd_suspend: hdmi_hpd_suspend {
+ mux {
+ pins = "gpio34";
+ function = "hdmi_hot";
+ };
+
+ config {
+ pins = "gpio34";
+ bias-pull-down;
+ drive-strength = <2>;
+ };
+ };
+
+ hdmi_ddc_active: hdmi_ddc_active {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+
+ hdmi_ddc_suspend: hdmi_ddc_suspend {
+ mux {
+ pins = "gpio32", "gpio33";
+ function = "hdmi_ddc";
+ };
+
+ config {
+ pins = "gpio32", "gpio33";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
};
timer@09840000 {
@@ -265,5 +354,286 @@
<960000000>,
<825000000>;
};
+
+ mdp_smmu: arm,smmu@d00000 {
+ compatible = "arm,smmu-v2";
+ reg = <0xd00000 0x10000>;
+
+ #global-interrupts = <1>;
+ interrupts = <0 73 0>,
+ <0 320 0>,
+ <0 321 0>;
+ #iommu-cells = <1>;
+
+ clocks = <&mmcc MMSS_MMAGIC_AHB_CLK>,
+ <&mmcc MMSS_MMAGIC_CFG_AHB_CLK>,
+ <&mmcc SMMU_MDP_AHB_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MMAGIC_MDSS_AXI_CLK>,
+ <&mmcc MMSS_MISC_AHB_CLK>;
+ clock-names = "mmagic_iface_clk",
+ "mmagic_cfg_iface_clk",
+ "smmu_iface_clk",
+ "smmu_bus_clk",
+ "mmagic_mdss_bus_clk",
+ "mmss_misc_bus_clk";
+
+ status = "okay";
+ };
+
+ mdp: mdp@900000 {
+ compatible = "qcom,mdp5";
+ reg = <0x900000 0x90000>,
+ <0x9b0000 0x1040>,
+ <0x9b8000 0x1040>;
+ reg-names = "mdp_phys",
+ "vbif_phys",
+ "vbif_nrt_phys";
+
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ connectors = <&hdmi>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MMSS_MMAGIC_AHB_CLK>,
+ <&mmcc MDSS_AXI_CLK>,
+ <&mmcc MDP_CLK_SRC>,
+ <&mmcc MDSS_MDP_CLK>,
+ <&mmcc SMMU_MDP_AXI_CLK>,
+ <&mmcc MDSS_VSYNC_CLK>,
+ <&mmcc MMAGIC_MDSS_AXI_CLK>,
+ <&rpmcc RPM_MMAXI_CLK>;
+ clock-names = "iface_clk",
+ "mmagic_iface_clk",
+ "bus_clk",
+ "core_clk_src",
+ "core_clk",
+ "iommu_clk",
+ "vsync_clk",
+ "mmagic_mdss_bus_clk",
+ "rpm_mmaxi_clk";
+ iommus = <&mdp_smmu 0>;
+ };
+
+ hdmi: hdmi-tx@9a0000 {
+ compatible = "qcom,hdmi-tx-8996";
+ reg = <0x009a0000 0x50c>,
+ <0x00070000 0x6158>,
+ <0x009e0000 0xfff>;
+ reg-names = "core_physical",
+ "qfprom_physical",
+ "hdcp_physical";
+
+ interrupt-parent = <&mdp>;
+ interrupts = <8 0>;
+
+ power-domains = <&mmcc MDSS_GDSC>;
+ clocks = <&mmcc MDSS_MDP_CLK>,
+ <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MDSS_HDMI_CLK>,
+ <&mmcc MDSS_HDMI_AHB_CLK>,
+ <&mmcc MDSS_EXTPCLK_CLK>;
+ clock-names =
+ "mdp_core_clk",
+ "iface_clk",
+ "core_clk",
+ "alt_iface_clk",
+ "extp_clk";
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&hdmi_hpd_active &hdmi_ddc_active>;
+ pinctrl-1 = <&hdmi_hpd_suspend &hdmi_ddc_suspend>;
+
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi_phy";
+ };
+
+ hdmi_phy: hdmi-phy@9a0600 {
+ compatible = "qcom,hdmi-phy-8996";
+ reg = <0x9a0600 0x1c4>,
+ <0x9a0a00 0x124>,
+ <0x9a0c00 0x124>,
+ <0x9a0e00 0x124>,
+ <0x9a1000 0x124>,
+ <0x9a1200 0x0c8>;
+ reg-names = "hdmi_pll",
+ "hdmi_tx_l0",
+ "hdmi_tx_l1",
+ "hdmi_tx_l2",
+ "hdmi_tx_l3",
+ "hdmi_phy";
+
+ power-domains = <&mmcc MDSS_GDSC>;
+
+ clocks = <&mmcc MDSS_AHB_CLK>,
+ <&mmcc MMSS_MMAGIC_AHB_CLK>,
+ <&gcc GCC_HDMI_CLKREF_CLK>;
+ clock-names = "iface_clk",
+ "mmagic_iface_clk",
+ "ref_clk";
+ };
+
+ ufsphy1: ufsphy@627000 {
+ compatible = "qcom,ufs-phy-qmp-14nm";
+ reg = <0x627000 0xda8>;
+ reg-names = "phy_mem";
+ #phy-cells = <0>;
+
+ vdda-phy-supply = <&pm8994_l28>;
+ vdda-pll-supply = <&pm8994_l12>;
+
+ vdda-phy-max-microamp = <18380>;
+ vdda-pll-max-microamp = <9440>;
+
+ vddp-ref-clk-supply = <&pm8994_l25>;
+ vddp-ref-clk-max-microamp = <100>;
+ vddp-ref-clk-always-on;
+ clock-names = "ref_clk_src", "ref_clk";
+ clocks = <&rpmcc RPM_LN_BB_CLK>,
+ <&gcc GCC_UFS_CLKREF_CLK>;
+ power-domains = <&gcc UFS_GDSC>;
+ status = "disabled";
+ };
+
+ ufs1: ufshc@624000 {
+ compatible = "qcom,ufshc";
+ reg = <0x624000 0x2500>;
+ interrupts = <0 265 0>;
+ phys = <&ufsphy1>;
+ phy-names = "ufsphy";
+
+ vcc-supply = <&pm8994_l20>;
+ vccq-supply = <&pm8994_l25>;
+ vccq2-supply = <&pm8994_s4>;
+
+ vcc-max-microamp = <600000>;
+ vccq-max-microamp = <450000>;
+ vccq2-max-microamp = <450000>;
+
+ clock-names =
+ "core_clk_src",
+ "core_clk",
+ "bus_clk",
+ "bus_aggr_clk",
+ "iface_clk",
+ "core_clk_unipro_src",
+ "core_clk_unipro",
+ "core_clk_ice",
+ "ref_clk",
+ "tx_lane0_sync_clk",
+ "rx_lane0_sync_clk";
+ clocks =
+ <&gcc UFS_AXI_CLK_SRC>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_SYS_NOC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_UFS_AHB_CLK>,
+ <&gcc UFS_ICE_CORE_CLK_SRC>,
+ <&gcc GCC_UFS_UNIPRO_CORE_CLK>,
+ <&gcc GCC_UFS_ICE_CORE_CLK>,
+ <&rpmcc RPM_LN_BB_CLK>,
+ <&gcc GCC_UFS_TX_SYMBOL_0_CLK>,
+ <&gcc GCC_UFS_RX_SYMBOL_0_CLK>;
+ freq-table-hz =
+ <100000000 200000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <150000000 300000000>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>,
+ <0 0>;
+
+ lanes-per-direction = <1>;
+ status = "disabled";
+
+ ufs_variant {
+ compatible = "qcom,ufs_variant";
+ };
+ };
+ };
+
+ glink {
+ compatible = "qcom,glink";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+
+ rpm {
+ qcom,glink-edge = "rpm";
+ interrupts = <0 168 1>;
+ qcom,irq-mask = <0x1>;
+ reg = <0x00068000 0x6000>,
+ <0x09820010 0x4>;
+ reg-names = "msgram", "irq-reg-base";
+
+ rpm-requests {
+ compatible = "qcom,rpm-msm8996";
+ qcom,glink-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8996", "qcom,rpmcc";
+ #clock-cells = <1>;
+ };
+
+ pm8994-regulators {
+ compatible = "qcom,rpm-pm8994-regulators";
+
+ pm8994_s1: s1 {};
+ pm8994_s2: s2 {};
+ pm8994_s3: s3 {};
+ pm8994_s4: s4 {};
+ pm8994_s5: s5 {};
+ pm8994_s6: s6 {};
+ pm8994_s7: s7 {};
+ pm8994_s8: s8 {};
+ pm8994_s9: s9 {};
+ pm8994_s10: s10 {};
+ pm8994_s12: s12 {};
+
+ pm8994_l1: l1 {};
+ pm8994_l2: l2 {};
+ pm8994_l3: l3 {};
+ pm8994_l4: l4 {};
+ pm8994_l5: l5 {};
+ pm8994_l6: l6 {};
+ pm8994_l7: l7 {};
+ pm8994_l8: l8 {};
+ pm8994_l9: l9 {};
+ pm8994_l10: l10 {};
+ pm8994_l11: l11 {};
+ pm8994_l12: l12 {};
+ pm8994_l13: l13 {};
+ pm8994_l14: l14 {};
+ pm8994_l15: l15 {};
+ pm8994_l16: l16 {};
+ pm8994_l17: l17 {};
+ pm8994_l18: l18 {};
+ pm8994_l19: l19 {};
+ pm8994_l20: l20 {};
+ pm8994_l21: l21 {};
+ pm8994_l22: l22 {};
+ pm8994_l23: l23 {};
+ pm8994_l24: l24 {};
+ pm8994_l25: l25 {};
+ pm8994_l26: l26 {};
+ pm8994_l27: l27 {};
+ pm8994_l28: l28 {};
+ pm8994_l29: l29 {};
+ pm8994_l30: l30 {};
+ pm8994_l31: l31 {};
+ pm8994_l32: l32 {};
+ };
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index f71679b15d544..75520bc94c9c9 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -1,4 +1,5 @@
#include <dt-bindings/iio/qcom,spmi-vadc.h>
+#include <dt-bindings/input/input.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/spmi/spmi.h>
@@ -17,12 +18,15 @@
interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
};
- pwrkey@800 {
+ spmi_pon: pwrkey@800 {
compatible = "qcom,pm8941-pwrkey";
reg = <0x800>;
- interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>;
+ interrupts = <0x0 0x8 0 IRQ_TYPE_EDGE_BOTH>,
+ <0x0 0x8 1 IRQ_TYPE_EDGE_BOTH>;
debounce = <15625>;
bias-pull-up;
+ resin-pull-up;
+ linux,code = <KEY_RESTART>;
};
pm8916_gpios: gpios@c000 {
@@ -95,5 +99,132 @@
reg = <0x1 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ regulators {
+ compatible = "qcom,pm8916-regulators";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ s1@1400 {
+ reg = <0x1400 0x300>;
+ status = "disabled";
+ };
+
+ pm8916_s2: s2@1700 {
+ reg = <0x1700 0x300>;
+ status = "ok";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ s3@1a00 {
+ reg = <0x1a00 0x300>;
+ status = "disabled";
+ };
+
+ s4@1d00 {
+ reg = <0x1d00 0x300>;
+ status = "disabled";
+ };
+
+ l1@4000 {
+ reg = <0x4000 0x100>;
+ status = "disabled";
+ };
+
+ l2@4100 {
+ reg = <0x4100 0x100>;
+ status = "disabled";
+ };
+
+ l3@4200 {
+ reg = <0x4200 0x100>;
+ status = "disabled";
+ };
+
+ l4@4300 {
+ reg = <0x4300 0x100>;
+ status = "disabled";
+ };
+
+ l5@4400 {
+ reg = <0x4400 0x100>;
+ status = "disabled";
+ };
+
+ l6@4500 {
+ reg = <0x4500 0x100>;
+ status = "disabled";
+ };
+
+ l7@4600 {
+ reg = <0x4600 0x100>;
+ status = "disabled";
+ };
+
+ l8@4700 {
+ reg = <0x4700 0x100>;
+ status = "disabled";
+ };
+
+ l9@4800 {
+ reg = <0x4800 0x100>;
+ status = "disabled";
+ };
+
+ l10@4900 {
+ reg = <0x4900 0x100>;
+ status = "disabled";
+ };
+
+ l11@4a00 {
+ reg = <0x4a00 0x100>;
+ status = "disabled";
+ };
+
+ l12@4b00 {
+ reg = <0x4b00 0x100>;
+ status = "disabled";
+ };
+
+ l13@4c00 {
+ reg = <0x4c00 0x100>;
+ status = "disabled";
+ };
+
+ l14@4d00 {
+ reg = <0x4d00 0x100>;
+ status = "disabled";
+ };
+
+ l15@4e00 {
+ reg = <0x4e00 0x100>;
+ status = "disabled";
+ };
+
+ l16@4f00 {
+ reg = <0x4f00 0x100>;
+ status = "disabled";
+ };
+
+ l17@5000 {
+ reg = <0x5000 0x100>;
+ status = "disabled";
+ };
+
+ l18@5100 {
+ reg = <0x5100 0x100>;
+ status = "disabled";
+ };
+ };
+
+ wcd_codec: codec@f000 {
+ compatible = "qcom,apq8016-wcd-codec";
+ reg = <0xf000 0x200>;
+ #sound-dai-cells = <1>;
+ vddio-supply = <&pm8916_l5>;
+ vdd-pa-supply = <&pm8916_s4>;
+ vdd-mic-bias-supply = <&pm8916_l13>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
index 1222d2e904f6b..0fa57fedf9688 100644
--- a/arch/arm64/boot/dts/qcom/pm8994.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8994.dtsi
@@ -58,5 +58,19 @@
reg = <0x1 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ regulators {
+ compatible = "qcom,pm8994-regulators";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ pm8994_s11: s11@3200 {
+ reg = <0x3200 0x100>;
+ status = "ok";
+ regulator-min-microvolt = <520000>;
+ regulator-max-microvolt = <1140000>;
+ regulator-always-on;
+ };
+ };
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index a44ef995d8ae8..3cb98a4df23f0 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,7 +1,6 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -22,7 +21,6 @@ CONFIG_CGROUP_HUGETLB=y
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
@@ -41,6 +39,7 @@ CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MVEBU=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_R8A7795=y
@@ -75,6 +74,13 @@ CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+# CONFIG_ARM_TEGRA_CPUFREQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -84,18 +90,34 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
# CONFIG_IPV6 is not set
CONFIG_BPF_JIT=y
-# CONFIG_WIRELESS is not set
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_QCOMSMD=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
-# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -119,6 +141,12 @@ CONFIG_SMC91X=y
CONFIG_SMSC911X=y
CONFIG_MICREL_PHY=y
# CONFIG_WLAN is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_ATH_CARDS=y
+CONFIG_WCN36XX=m
+CONFIG_WCN36XX_DEBUGFS=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
@@ -158,29 +186,51 @@ CONFIG_SPI_QUP=y
CONFIG_SPMI=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8996=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_PL061=y
CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_QCOM_SMSM=y
+CONFIG_GPIO_QCOM_SMP2P=y
CONFIG_GPIO_XGENE=y
CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_AVS=y
+CONFIG_QCOM_CPR=y
+CONFIG_POWER_RESET_MSM=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_SEC_CORE=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
CONFIG_REGULATOR_S2MPS11=y
+CONFIG_CPU_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_MFD_QCOM_RPM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_RPM=y
CONFIG_FB=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_DRM=y
+CONFIG_DRM_I2C_ADV7511=y
+# CONFIG_DRM_I2C_ADV7511_SLAVE_ENCODER is not set
+CONFIG_DRM=y
CONFIG_FB_ARMCLCD=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -189,15 +239,24 @@ CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_RCAR=y
CONFIG_SND_SOC_AK4613=y
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_MSM8x16_WCD=y
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC2=y
CONFIG_USB_CHIPIDEA=y
@@ -208,6 +267,27 @@ CONFIG_USB_HSIC_USB3503=y
CONFIG_USB_MSM_OTG=y
CONFIG_USB_ULPI=y
CONFIG_USB_GADGET=y
+CONFIG_USB_UAS=y
+CONFIG_USBIP_CORE=y
+CONFIG_USBIP_HOST=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_TEST=m
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_USB_MSM_OTG=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_GADGET_VERBOSE=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
@@ -224,6 +304,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
@@ -239,6 +320,7 @@ CONFIG_QCOM_BAM_DMA=y
CONFIG_RCAR_DMAC=y
CONFIG_VFIO=y
CONFIG_VFIO_PCI=y
+CONFIG_QCOM_ADM=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
@@ -247,7 +329,10 @@ CONFIG_XEN_GRANT_DEV_ALLOC=y
CONFIG_COMMON_CLK_SCPI=y
CONFIG_COMMON_CLK_CS2000_CP=y
CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_QCOM_RPMCC=y
CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_MMCC_8996=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
CONFIG_ARM_MHU=y
@@ -256,6 +341,8 @@ CONFIG_ARM_SMMU=y
CONFIG_QCOM_SMEM=y
CONFIG_QCOM_SMD=y
CONFIG_QCOM_SMD_RPM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_ARCH_TEGRA_132_SOC=y
CONFIG_ARCH_TEGRA_210_SOC=y
CONFIG_EXTCON_USB_GPIO=y
@@ -264,6 +351,23 @@ CONFIG_PHY_RCAR_GEN3_USB2=y
CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_XGENE=y
CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_HISILICON_IRQ_MBIGEN=y
+CONFIG_QCOM_A53=y
+CONFIG_IOMMU_API=y
+CONFIG_OF_IOMMU=y
+CONFIG_IOMMU_SUPPORT=y
+CONFIG_QCOM_IOMMU_V1=y
+CONFIG_QCOM_GSBI=y
+CONFIG_EXTCON=y
+CONFIG_MSM_BUS_SCALING=y
+# CONFIG_MSM_BUSPM_DEV is not set
+CONFIG_BUS_TOPOLOGY_ADHOC=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_TZ_PIL=y
+CONFIG_PHY_XGENE=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_FANOTIFY=y
@@ -273,18 +377,18 @@ CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_CONFIGFS_FS=y
CONFIG_EFIVAR_FS=y
CONFIG_SQUASHFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_DEBUG=y
CONFIG_NFS_FS=y
CONFIG_NFS_V4=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
@@ -292,10 +396,18 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_PROVE_LOCKING=y
# CONFIG_FTRACE is not set
CONFIG_MEMTEST=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SINK_ETBV10=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
@@ -307,3 +419,14 @@ CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_MOD=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_PHY_QCOM_UFS=y
+CONFIG_UFS_FS=y
+CONFIG_UFS_DEBUG=y
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index c64268dbff64c..32a2d9b98effb 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -86,6 +86,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 8f03446cf89f1..41bf609206ed6 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -71,4 +71,9 @@ static inline void __init cpu_read_bootcpu_ops(void)
cpu_read_ops(0);
}
+#define CPU_METHOD_OF_DECLARE(name, __ops) \
+ static const struct cpu_operations *__cpu_method_table_##name \
+ __used __section(__cpu_method_of_table) \
+ = __ops;
+
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef256b8c9e..908a6a53ed49d 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -22,9 +22,18 @@ struct dev_archdata {
void *iommu; /* private IOMMU data */
#endif
bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct pdev_archdata {
};
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
new file mode 100644
index 0000000000000..a8c56acc8c985
--- /dev/null
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -0,0 +1,36 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index af58dcdefb21a..63bea4f70a744 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -36,6 +36,7 @@ static inline u32 mpidr_hash_size(void)
return 1 << mpidr_hash.bits;
}
+extern void secondary_holding_pen(void);
/*
* Logical CPU mapping.
*/
@@ -55,5 +56,6 @@ static inline int get_logical_index(u64 mpidr)
return cpu;
return -EINVAL;
}
+extern volatile unsigned long secondary_holding_pen_release;
#endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index c7cfb8fe06f94..db6e58aab6671 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -27,6 +27,9 @@
extern const struct cpu_operations smp_spin_table_ops;
extern const struct cpu_operations acpi_parking_protocol_ops;
extern const struct cpu_operations cpu_psci_ops;
+extern struct cpu_operations __cpu_method_of_table[];
+static const struct cpu_operations *__cpu_method_of_table_sentinel
+ __used __section(__cpu_method_of_table_end);
const struct cpu_operations *cpu_ops[NR_CPUS];
@@ -47,7 +50,7 @@ static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
const struct cpu_operations **ops;
-
+ pr_emerg("CPU %s \n", name);
ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
while (*ops) {
@@ -57,6 +60,14 @@ static const struct cpu_operations * __init cpu_get_ops(const char *name)
ops++;
}
+ ops = (void *)__cpu_method_of_table;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+ ops++;
+ }
+
return NULL;
}
@@ -85,6 +96,7 @@ static const char *__init cpu_read_enable_method(int cpu)
dn->full_name);
}
} else {
+ pr_emerg("CPU enable method %s \n",__func__);
enable_method = acpi_get_enable_method(cpu);
if (!enable_method) {
/*
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 42816bebb1e0f..106c5920819b2 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -126,3 +126,4 @@ const struct cpu_operations cpu_psci_ops = {
#endif
};
+CPU_METHOD_OF_DECLARE(psci, &cpu_psci_ops);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b2d5f4ee9a1cd..28cb5f95e2194 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -65,6 +65,7 @@
struct secondary_data secondary_data;
/* Number of CPUs which aren't online, but looping in kernel text. */
int cpus_stuck_in_kernel;
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 18a71bcd26ee4..adce604a94a0f 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -28,9 +28,6 @@
#include <asm/io.h>
#include <asm/smp_plat.h>
-extern void secondary_holding_pen(void);
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
-
static phys_addr_t cpu_release_addr[NR_CPUS];
/*
@@ -132,3 +129,4 @@ const struct cpu_operations smp_spin_table_ops = {
.cpu_prepare = smp_spin_table_cpu_prepare,
.cpu_boot = smp_spin_table_cpu_boot,
};
+CPU_METHOD_OF_DECLARE(spin_table, &smp_spin_table_ops);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 6df07069a0253..69aeb3aaeca36 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -119,7 +119,7 @@ ENTRY(__inval_cache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_inv_range:
+ENTRY(__dma_inv_range)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -145,7 +145,7 @@ ENDPROC(__dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_clean_range:
+ENTRY(__dma_clean_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index a6e757cbab778..1f1552d605f9d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -24,15 +24,24 @@
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+
+#include "mm.h"
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ return pgprot_writecombine(prot);
+ else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+ prot = pgprot_noncached(prot);
+ else if (!coherent)
return pgprot_writecombine(prot);
return prot;
}
@@ -804,57 +813,24 @@ struct iommu_dma_notifier_data {
static LIST_HEAD(iommu_dma_masters);
static DEFINE_MUTEX(iommu_dma_notifier_lock);
-/*
- * Temporarily "borrow" a domain feature flag to to tell if we had to resort
- * to creating our own domain here, in case we need to clean it up again.
- */
-#define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
-
static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
u64 dma_base, u64 size)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
/*
- * Best case: The device is either part of a group which was
- * already attached to a domain in a previous call, or it's
- * been put in a default DMA domain by the IOMMU core.
+ * If the IOMMU driver has the DMA domain support that we require,
+ * then the IOMMU core will have already configured a group for this
+ * device, and allocated the default domain for that group.
*/
- if (!domain) {
- /*
- * Urgh. The IOMMU core isn't going to do default domains
- * for non-PCI devices anyway, until it has some means of
- * abstracting the entirely implementation-specific
- * sideband data/SoC topology/unicorn dust that may or
- * may not differentiate upstream masters.
- * So until then, HORRIBLE HACKS!
- */
- domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
- if (!domain)
- goto out_no_domain;
-
- domain->ops = ops;
- domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
-
- if (iommu_attach_device(domain, dev))
- goto out_put_domain;
+ if (!domain || iommu_dma_init_domain(domain, dma_base, size)) {
+ pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
+ dev_name(dev));
+ return false;
}
- if (iommu_dma_init_domain(domain, dma_base, size))
- goto out_detach;
-
dev->archdata.dma_ops = &iommu_dma_ops;
return true;
-
-out_detach:
- iommu_detach_device(domain, dev);
-out_put_domain:
- if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
- iommu_domain_free(domain);
-out_no_domain:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
- return false;
}
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
@@ -933,6 +909,10 @@ static int __init __iommu_dma_init(void)
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
if (!ret)
ret = register_iommu_dma_ops_notifier(&amba_bustype);
+#ifdef CONFIG_PCI
+ if (!ret)
+ ret = register_iommu_dma_ops_notifier(&pci_bus_type);
+#endif
/* handle devices queued before this arch_initcall */
if (!ret)
@@ -967,11 +947,8 @@ void arch_teardown_dma_ops(struct device *dev)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- if (domain) {
+ if (WARN_ON(domain))
iommu_detach_device(domain, dev);
- if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
- iommu_domain_free(domain);
- }
dev->archdata.dma_ops = NULL;
}
@@ -993,3 +970,1031 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
}
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_unmap_area(page_address(page) + off, size, dir);
+
+ /*
+ * Mark the D-cache clean for this page to avoid extra flushing.
+ */
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+/* IOMMU */
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+ struct dma_attrs *attrs)
+{
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ void *ptr = page_address(page);
+ if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+}
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size, attrs);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
+{
+ return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len,
+ IOMMU_READ|IOMMU_WRITE);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
+ size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct page *page;
+ phys_addr_t phys;
+
+ phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+ page = phys_to_page(phys);
+
+ return (struct page **)page;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
+ area = find_vm_area(cpu_addr);
+ if (area)
+ return area->pages;
+ return NULL;
+}
+
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t flags)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page, flags);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(cpu_addr, size);
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL,
+ is_device_dma_coherent(dev));
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ if (!gfpflags_allow_blocking(gfp))
+ return __iommu_alloc_atomic(dev, size, handle, gfp);
+
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size, attrs);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
+ if (!pages)
+ return -ENXIO;
+
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ size = PAGE_ALIGN(size);
+
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
+ return;
+ }
+
+ pages = __iommu_get_pages(cpu_addr, attrs);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
+}
+
+int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ if (!pages)
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+ GFP_KERNEL);
+}
+
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+ int prot;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!is_coherent &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length,
+ dir);
+
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK)
+ || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir, attrs, is_coherent) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
+ is_coherent) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of i/o coherent buffers described by scatterlist in streaming
+ * mode for DMA. The scatter gather list elements are merged together (if
+ * possible) and tagged with the appropriate dma address and length. They are
+ * obtained via sg_dma_{address,length}.
+ */
+int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
+}
+
+static int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
+ struct scatterlist *sg, unsigned int len, int opt)
+{
+ s32 ret = 0;
+ u32 offset = 0;
+ u32 start_iova = iova;
+
+ BUG_ON(iova & (~PAGE_MASK));
+
+ while (offset < len) {
+ phys_addr_t phys = page_to_phys(sg_page(sg));
+ u32 page_len = PAGE_ALIGN(sg->offset + sg->length);
+
+ ret = iommu_map(domain, iova, phys, page_len, opt);
+ if (ret)
+ goto fail;
+
+ iova += page_len;
+ offset += page_len;
+ if (offset < len)
+ sg = sg_next(sg);
+ }
+
+ goto out;
+
+fail:
+ /* undo mappings already done in case of error */
+ iommu_unmap(domain, start_iova, offset);
+out:
+
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int ret, i;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int iova, total_length = 0, current_offset = 0;
+ int prot = __dma_direction_to_prot(dir);
+
+ for_each_sg(sg, s, nents, i)
+ total_length += s->length;
+
+ iova = __alloc_iova(mapping, total_length);
+ ret = iommu_map_range(mapping->domain, iova, sg, total_length, prot);
+ if (ret) {
+ __free_iova(mapping, iova, total_length);
+ return 0;
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = iova + current_offset;
+ s->dma_length = total_length - current_offset;
+ current_offset += s->length;
+ }
+
+ return nents;
+}
+
+static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!is_coherent &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int total_length = sg_dma_len(sg);
+ unsigned int iova = sg_dma_address(sg);
+
+ total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, total_length);
+ __free_iova(mapping, iova, total_length);
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_coherent_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Coherent IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, prot, len = PAGE_ALIGN(size + offset);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
+ prot);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_coherent_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Coherent IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+const struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
+};
+
+const struct dma_map_ops iommu_coherent_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
+
+ .map_page = arm_coherent_iommu_map_page,
+ .unmap_page = arm_coherent_iommu_unmap_page,
+
+ .map_sg = arm_coherent_iommu_map_sg,
+ .unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+EXPORT_SYMBOL(arm_iommu_release_mapping);
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ dev->archdata.dma_ops = &iommu_ops;
+ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+EXPORT_SYMBOL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ dev->archdata.mapping = NULL;
+ dev->archdata.dma_ops = NULL;
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL(arm_iommu_detach_device);
+
+#endif
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f437afa17f2b1..17e64a4b929fc 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -557,6 +557,12 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
+ if (of_have_populated_dt()) {
+ ret = of_dma_configure_ops(_dev, _dev->of_node);
+ if (ret < 0)
+ goto done;
+ }
+
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
if (drv->probe) {
@@ -569,6 +575,11 @@ static int platform_drv_probe(struct device *_dev)
}
}
+ if (of_have_populated_dt()) {
+ if (ret)
+ of_dma_deconfigure(_dev);
+ }
+done:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
@@ -591,6 +602,8 @@ static int platform_drv_remove(struct device *_dev)
if (drv->remove)
ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
+ if (of_have_populated_dt())
+ of_dma_deconfigure(_dev);
return ret;
}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index d8f4cc22856c9..23c12a9b67c90 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -33,9 +33,10 @@ static LIST_HEAD(opp_tables);
/* Lock to allow exclusive modification to the device and opp lists */
DEFINE_MUTEX(opp_table_lock);
-#define opp_rcu_lockdep_assert() \
+#define opp_rcu_lockdep_assert(s) \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !(s && srcu_read_lock_held(s)) && \
!lockdep_is_held(&opp_table_lock), \
"Missing rcu_read_lock() or " \
"opp_table_lock protection"); \
@@ -93,7 +94,7 @@ struct opp_table *_find_opp_table(struct device *dev)
{
struct opp_table *opp_table;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: Invalid parameters\n", __func__);
@@ -127,7 +128,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
tmp_opp = rcu_dereference(opp);
if (IS_ERR_OR_NULL(tmp_opp))
@@ -159,7 +160,7 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
tmp_opp = rcu_dereference(opp);
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
@@ -171,6 +172,27 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
+struct regulator *dev_pm_opp_get_regulator(struct device *dev)
+{
+ struct opp_table *opp_table;
+ struct regulator *reg;
+
+ rcu_read_lock();
+
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ rcu_read_unlock();
+ return ERR_CAST(opp_table);
+ }
+
+ reg = opp_table->regulator;
+
+ rcu_read_unlock();
+
+ return reg;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_regulator);
+
/**
* dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
* @opp: opp for which turbo mode is being verified
@@ -193,7 +215,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
struct dev_pm_opp *tmp_opp;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
tmp_opp = rcu_dereference(opp);
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
@@ -321,7 +343,7 @@ struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
struct opp_table *opp_table;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
@@ -401,7 +423,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
opp_table = _find_opp_table(dev);
if (IS_ERR(opp_table)) {
@@ -450,7 +472,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -500,7 +522,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct opp_table *opp_table;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -1438,12 +1460,13 @@ int dev_pm_opp_set_regulator(struct device *dev, const char *name)
ret = -ENOMEM;
goto unlock;
}
-
+#if 0
/* This should be called before OPPs are initialized */
if (WARN_ON(!list_empty(&opp_table->opp_list))) {
ret = -EBUSY;
goto err;
}
+#endif
/* Already have a regulator set */
if (WARN_ON(!IS_ERR(opp_table->regulator))) {
@@ -1767,6 +1790,83 @@ unlock:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an OPP
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP voltage
+ *
+ * Change the voltage of an OPP with an RCU operation.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct opp_table *opp_table;
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* keep the node allocated */
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
+ if (!new_opp)
+ return -ENOMEM;
+
+ mutex_lock(&opp_table_lock);
+
+ /* Find the opp_table */
+ opp_table = _find_opp_table(dev);
+ if (IS_ERR(opp_table)) {
+ r = PTR_ERR(opp_table);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ goto unlock;
+ }
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->u_volt == u_volt)
+ goto unlock;
+ /* copy the old data over */
+ *new_opp = *opp;
+
+ /* plug in new node */
+ new_opp->u_volt = u_volt;
+
+ list_replace_rcu(&opp->node, &new_opp->node);
+ mutex_unlock(&opp_table_lock);
+ call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+
+ /* Notify the change of the OPP */
+ srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADJUST_VOLTAGE,
+ new_opp);
+
+ return 0;
+
+unlock:
+ mutex_unlock(&opp_table_lock);
+ kfree(new_opp);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 252188fd8bcdf..2c97af0f978cd 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -26,35 +26,27 @@
* parent - parent is adjustable through clk_set_parent
*/
-static u8 clk_mux_get_parent(struct clk_hw *hw)
+#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
+
+unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val,
+ unsigned int *table, unsigned long flags)
{
struct clk_mux *mux = to_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
- /*
- * FIXME need a mux-specific flag to determine if val is bitwise or numeric
- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
- * to 0x7 (index starts at one)
- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
- * val = 0x4 really means "bit 2, index starts at bit 0"
- */
- val = clk_readl(mux->reg) >> mux->shift;
- val &= mux->mask;
-
- if (mux->table) {
+ if (table) {
int i;
for (i = 0; i < num_parents; i++)
- if (mux->table[i] == val)
+ if (table[i] == val)
return i;
return -EINVAL;
}
- if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
- if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
@@ -62,23 +54,53 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
+EXPORT_SYMBOL_GPL(clk_mux_get_parent);
-static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+static u8 _clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- unsigned long flags = 0;
- if (mux->table) {
- index = mux->table[index];
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or numeric
+ * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
+ * to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
+
+ return clk_mux_get_parent(hw, val, mux->table, mux->flags);
+}
+
+unsigned int clk_mux_reindex(u8 index, unsigned int *table,
+ unsigned long flags)
+{
+ unsigned int val = index;
+
+ if (table) {
+ val = table[val];
} else {
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = 1 << index;
+ if (flags & CLK_MUX_INDEX_BIT)
+ val = 1 << index;
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+ if (flags & CLK_MUX_INDEX_ONE)
+ val++;
}
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_reindex);
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ index = clk_mux_reindex(index, mux->table, mux->flags);
+
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
@@ -102,14 +124,14 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
const struct clk_ops clk_mux_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
const struct clk_ops clk_mux_ro_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
@@ -117,7 +139,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk *clk;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fb74dc1f75205..c5e98470ee218 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -51,9 +51,13 @@ struct clk_core {
struct clk_core **parents;
u8 num_parents;
u8 new_parent_index;
+ u8 safe_parent_index;
unsigned long rate;
unsigned long req_rate;
+ unsigned long old_rate;
unsigned long new_rate;
+ unsigned long safe_freq;
+ struct clk_core *safe_parent;
struct clk_core *new_parent;
struct clk_core *new_child;
unsigned long flags;
@@ -284,6 +288,12 @@ const char *clk_hw_get_name(const struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_name);
+struct clk *clk_hw_get_clk(const struct clk_hw *hw)
+{
+ return hw->clk;
+}
+EXPORT_SYMBOL_GPL(clk_hw_get_clk);
+
struct clk_hw *__clk_get_hw(struct clk *clk)
{
return !clk ? NULL : clk->core->hw;
@@ -359,7 +369,7 @@ static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
}
struct clk_hw *
-clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
+clk_hw_get_parent_by_index(const struct clk_hw *hw, u8 index)
{
struct clk_core *parent;
@@ -839,7 +849,7 @@ int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
}
EXPORT_SYMBOL_GPL(__clk_determine_rate);
-unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
+long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
{
int ret;
struct clk_rate_request req;
@@ -1252,7 +1262,9 @@ out:
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
struct clk_core *new_parent, u8 p_index)
{
- struct clk_core *child;
+ struct clk_core *child, *parent;
+ struct clk_hw *parent_hw;
+ unsigned long safe_freq = 0;
core->new_rate = new_rate;
core->new_parent = new_parent;
@@ -1262,6 +1274,23 @@ static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
if (new_parent && new_parent != core->parent)
new_parent->new_child = core;
+ if (core->ops->get_safe_parent) {
+ parent_hw = core->ops->get_safe_parent(core->hw, &safe_freq);
+ if (parent_hw) {
+ parent = parent_hw->core;
+ p_index = clk_fetch_parent_index(core, parent);
+ core->safe_parent_index = p_index;
+ core->safe_parent = parent;
+ if (safe_freq)
+ core->safe_freq = safe_freq;
+ else
+ core->safe_freq = 0;
+ }
+ } else {
+ core->safe_parent = NULL;
+ core->safe_freq = 0;
+ }
+
hlist_for_each_entry(child, &core->children, child_node) {
child->new_rate = clk_recalc(child, new_rate);
clk_calc_subtree(child, child->new_rate, NULL, 0);
@@ -1374,14 +1403,51 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
unsigned long event)
{
struct clk_core *child, *tmp_clk, *fail_clk = NULL;
+ struct clk_core *old_parent;
int ret = NOTIFY_DONE;
- if (core->rate == core->new_rate)
+ if (core->rate == core->new_rate && event != POST_RATE_CHANGE)
return NULL;
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (core->safe_parent) {
+ if (core->safe_freq)
+ core->ops->set_rate_and_parent(core->hw,
+ core->safe_freq,
+ core->safe_parent->rate,
+ core->safe_parent_index);
+ else
+ core->ops->set_parent(core->hw,
+ core->safe_parent_index);
+ }
+ core->old_rate = core->rate;
+ break;
+ case POST_RATE_CHANGE:
+ if (core->safe_parent) {
+ old_parent = __clk_set_parent_before(core,
+ core->new_parent);
+ if (core->ops->set_rate_and_parent) {
+ core->ops->set_rate_and_parent(core->hw,
+ core->new_rate,
+ core->new_parent ?
+ core->new_parent->rate : 0,
+ core->new_parent_index);
+ } else if (core->ops->set_parent) {
+ core->ops->set_parent(core->hw,
+ core->new_parent_index);
+ }
+ __clk_set_parent_after(core, core->new_parent,
+ old_parent);
+ }
+ break;
+ }
+
if (core->notifier_count) {
- ret = __clk_notify(core, event, core->rate, core->new_rate);
- if (ret & NOTIFY_STOP_MASK)
+ if (event != POST_RATE_CHANGE || core->old_rate != core->rate)
+ ret = __clk_notify(core, event, core->old_rate,
+ core->new_rate);
+ if (ret & NOTIFY_STOP_MASK && event != POST_RATE_CHANGE)
fail_clk = core;
}
@@ -1408,12 +1474,12 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
-static void clk_change_rate(struct clk_core *core)
+static void
+clk_change_rate(struct clk_core *core, unsigned long best_parent_rate)
{
struct clk_core *child;
struct hlist_node *tmp;
unsigned long old_rate;
- unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
@@ -1433,7 +1499,8 @@ static void clk_change_rate(struct clk_core *core)
clk_enable_unlock(flags);
}
- if (core->new_parent && core->new_parent != core->parent) {
+ if (core->new_parent && core->new_parent != core->parent &&
+ !core->safe_parent) {
old_parent = __clk_set_parent_before(core, core->new_parent);
trace_clk_set_parent(core, core->new_parent);
@@ -1458,6 +1525,7 @@ static void clk_change_rate(struct clk_core *core)
trace_clk_set_rate_complete(core, core->new_rate);
core->rate = clk_recalc(core, best_parent_rate);
+ core->rate = core->new_rate;
if (core->flags & CLK_SET_RATE_UNGATE) {
unsigned long flags;
@@ -1482,12 +1550,13 @@ static void clk_change_rate(struct clk_core *core)
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
continue;
- clk_change_rate(child);
+ if (child->new_rate != child->rate)
+ clk_change_rate(child, core->new_rate);
}
- /* handle the new child who might not be in core->children yet */
- if (core->new_child)
- clk_change_rate(core->new_child);
+ /* handle the new child who might not be in clk->children yet */
+ if (core->new_child && core->new_child->new_rate != core->new_child->rate)
+ clk_change_rate(core->new_child, core->new_rate);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1496,6 +1565,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
int ret = 0;
+ unsigned long parent_rate;
if (!core)
return 0;
@@ -1521,11 +1591,18 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
return -EBUSY;
}
+ if (top->parent)
+ parent_rate = top->parent->rate;
+ else
+ parent_rate = 0;
+
/* change the rates */
- clk_change_rate(top);
+ clk_change_rate(top, parent_rate);
core->req_rate = req_rate;
+ clk_propagate_rate_change(top, POST_RATE_CHANGE);
+
return ret;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 95e3b3e0fa1c6..015a4a239da83 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -2,6 +2,9 @@ config QCOM_GDSC
bool
select PM_GENERIC_DOMAINS if PM
+config QCOM_RPMCC
+ bool
+
config COMMON_CLK_QCOM
tristate "Support for Qualcomm's clock controllers"
depends on OF
@@ -9,6 +12,32 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_RPM
+ tristate "RPM based Clock Controller"
+ depends on COMMON_CLK_QCOM && MFD_QCOM_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8064, msm8660, msm8960 etc.
+
+config QCOM_CLK_SMD_RPM
+ tristate "RPM over SMD based Clock Controller"
+ depends on COMMON_CLK_QCOM && QCOM_SMD_RPM
+ select QCOM_RPMCC
+ help
+ The RPM (Resource Power Manager) is a dedicated hardware engine for
+ managing the shared SoC resources in order to keep the lowest power
+ profile. It communicates with other hardware subsystems via shared
+ memory and accepts clock requests, aggregates the requests and turns
+ the clocks on/off or scales them on demand.
+ Say Y if you want to support the clocks exposed by the RPM on
+ platforms such as apq8016, apq8084, msm8974 etc.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
select QCOM_GDSC
@@ -131,3 +160,39 @@ config MSM_MMCC_8996
Support for the multimedia clock controller on msm8996 devices.
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+
+config QCOM_HFPLL
+ tristate "High-Frequency PLL (HFPLL) Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the high-frequency PLLs present on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+ tristate "KPSS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the Krait ACC and GCC clock controllers. Say Y
+ if you want to support CPU frequency scaling on devices such
+ as MSM8960, APQ8064, etc.
+
+config KRAITCC
+ tristate "Krait Clock Controller"
+ depends on COMMON_CLK_QCOM && ARM
+ select KRAIT_CLOCKS
+ help
+ Support for the Krait CPU clocks on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling.
+
+config KRAIT_CLOCKS
+ bool
+ select KRAIT_L2_ACCESSORS
+
+config QCOM_A53
+ tristate "A53 Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the A53 clock controller on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8916.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 2a25f4e75f49f..bf8a8d5e831c6 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -9,7 +9,11 @@ clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
+clk-qcom-y += clk-cpu-8996.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
@@ -26,3 +30,9 @@ obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
+obj-$(CONFIG_QCOM_A53) += clk-a53.o
+obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
+obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
diff --git a/drivers/clk/qcom/clk-a53.c b/drivers/clk/qcom/clk-a53.c
new file mode 100644
index 0000000000000..d895af53a90e8
--- /dev/null
+++ b/drivers/clk/qcom/clk-a53.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+#define F_APCS_PLL(f, l, m, n) { (f), (l), (m), (n), 0 }
+
+static struct pll_freq_tbl apcs_pll_freq[] = {
+ F_APCS_PLL(998400000, 52, 0x0, 0x1),
+ F_APCS_PLL(1094400000, 57, 0x0, 0x1),
+ F_APCS_PLL(1152000000, 62, 0x0, 0x1),
+ F_APCS_PLL(1209600000, 65, 0x0, 0x1),
+ F_APCS_PLL(1401600000, 73, 0x0, 0x1),
+};
+
+static struct clk_pll a53sspll = {
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .config_reg = 0x14,
+ .mode_reg = 0x00,
+ .status_reg = 0x1c,
+ .status_bit = 16,
+ .freq_tbl = apcs_pll_freq,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53sspll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_sr2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static const struct regmap_config a53sspll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40,
+ .fast_io = true,
+};
+
+static struct clk *a53ss_add_pll(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_pll *pll;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return ERR_CAST(base);
+
+ pll = &a53sspll;
+
+ regmap = devm_regmap_init_mmio(dev, base, &a53sspll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ return devm_clk_register_regmap(dev, &pll->clkr);
+}
+
+enum {
+ P_GPLL0,
+ P_A53SSPLL,
+};
+
+static const struct parent_map gpll0_a53sspll_map[] = {
+ { P_GPLL0, 4 },
+ { P_A53SSPLL, 5 },
+};
+
+static const char * const gpll0_a53sspll[] = {
+ "gpll0_vote",
+ "a53sspll",
+};
+
+static struct clk_regmap_mux_div a53ssmux = {
+ .reg_offset = 0x50,
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 4,
+ .safe_freq = 400000000,
+ .parent_map = gpll0_a53sspll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53ssmux",
+ .parent_names = gpll0_a53sspll,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_div_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk *a53ss_add_mux(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ struct clk_regmap_mux_div *mux;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux = &a53ssmux;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "qcom,apcs");
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ mux->clkr.regmap = regmap;
+ return devm_clk_register(dev, &mux->clkr.hw);
+}
+
+static const struct of_device_id qcom_a53_match_table[] = {
+ { .compatible = "qcom,clock-a53-msm8916" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_a53_match_table);
+
+static int qcom_a53_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk_pll, *clk_mux;
+ struct clk_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clks = devm_kcalloc(dev, 2, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ clk_pll = a53ss_add_pll(pdev);
+ if (IS_ERR(clk_pll))
+ return PTR_ERR(clk_pll);
+
+ clk_mux = a53ss_add_mux(pdev);
+ if (IS_ERR(clk_mux))
+ return PTR_ERR(clk_mux);
+
+ data->clks[0] = clk_pll;
+ data->clks[1] = clk_mux;
+ data->clk_num = 2;
+
+ clk_prepare_enable(clk_pll);
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+}
+
+static struct platform_driver qcom_a53_driver = {
+ .probe = qcom_a53_probe,
+ .driver = {
+ .name = "qcom-a53",
+ .of_match_table = qcom_a53_match_table,
+ },
+};
+
+static int __init qcom_a53_init(void)
+{
+ return platform_driver_register(&qcom_a53_driver);
+}
+arch_initcall(qcom_a53_init);
+
+static void __exit qcom_a53_exit(void)
+{
+ platform_driver_unregister(&qcom_a53_driver);
+}
+module_exit(qcom_a53_exit);
+
+MODULE_DESCRIPTION("Qualcomm A53 Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-a53");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6a03eaf7a934..1e950a9f50e57 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -62,9 +62,10 @@
#define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
struct clk_alpha_pll_postdiv, clkr)
-static int wait_for_pll(struct clk_alpha_pll *pll)
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+ const char *action)
{
- u32 val, mask, off;
+ u32 val, off;
int count;
int ret;
const char *name = clk_hw_get_name(&pll->clkr.hw);
@@ -74,26 +75,125 @@ static int wait_for_pll(struct clk_alpha_pll *pll)
if (ret)
return ret;
- if (val & PLL_VOTE_FSM_ENA)
- mask = PLL_ACTIVE_FLAG;
- else
- mask = PLL_LOCK_DET;
-
- /* Wait for pll to enable. */
for (count = 100; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
if (ret)
return ret;
- if ((val & mask) == mask)
+ if (inverse && (val & mask))
+ return 0;
+ else if ((val & mask) == mask)
return 0;
udelay(1);
}
- WARN(1, "%s didn't enable after voting for it!\n", name);
+ WARN(1, "%s failed to %s!\n", name, action);
return -ETIMEDOUT;
}
+static int wait_for_pll_enable(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "enable");
+}
+
+static int wait_for_pll_disable(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 1, "disable");
+}
+
+static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
+{
+ return wait_for_pll(pll, mask, 0, "offline");
+}
+
+
+/* alpha pll with hwfsm support */
+
+#define PLL_OFFLINE_REQ BIT(7)
+#define PLL_FSM_ENA BIT(20)
+#define PLL_OFFLINE_ACK BIT(28)
+#define PLL_ACTIVE_FLAG BIT(30)
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ u32 val, mask;
+
+ regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+ config->config_ctl_val);
+
+ val = config->main_output_mask;
+ val |= config->aux_output_mask;
+ val |= config->aux2_output_mask;
+ val |= config->early_output_mask;
+ val |= config->post_div_val;
+
+ mask = config->main_output_mask;
+ mask |= config->aux_output_mask;
+ mask |= config->aux2_output_mask;
+ mask |= config->early_output_mask;
+ mask |= config->post_div_mask;
+
+ regmap_update_bits(regmap, pll->offset + PLL_USER_CTL, mask, val);
+
+ return;
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off;
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return ret;
+ /* Enable HW FSM mode, clear OFFLINE request */
+ val |= PLL_FSM_ENA;
+ val &= ~PLL_OFFLINE_REQ;
+ ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
+ if (ret)
+ return ret;
+
+ /* Make sure enable request goes through before waiting for update */
+ mb();
+
+ ret = wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ int ret;
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 val, off;
+
+ off = pll->offset;
+ ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+ if (ret)
+ return;
+ /* Request PLL_OFFLINE and wait for ack */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+ if (ret)
+ return;
+ ret = wait_for_pll_offline(pll, PLL_OFFLINE_ACK);
+ if (ret)
+ return;
+
+ /* Disable hwfsm */
+ ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+ PLL_FSM_ENA, 0);
+ if (ret)
+ return;
+ wait_for_pll_disable(pll, PLL_ACTIVE_FLAG);
+ return;
+}
+
static int clk_alpha_pll_enable(struct clk_hw *hw)
{
int ret;
@@ -112,7 +212,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
ret = clk_enable_regmap(hw);
if (ret)
return ret;
- return wait_for_pll(pll);
+ return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
}
/* Skip if already enabled */
@@ -136,7 +236,7 @@ static int clk_alpha_pll_enable(struct clk_hw *hw)
if (ret)
return ret;
- ret = wait_for_pll(pll);
+ ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
if (ret)
return ret;
@@ -300,6 +400,15 @@ const struct clk_ops clk_alpha_pll_ops = {
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+ .enable = clk_alpha_pll_hwfsm_enable,
+ .disable = clk_alpha_pll_hwfsm_disable,
+ .recalc_rate = clk_alpha_pll_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
static unsigned long
clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 90ce2016e1a0e..b157ddf22f1a6 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -16,6 +16,7 @@
#include <linux/clk-provider.h>
#include "clk-regmap.h"
+#include "clk-pll.h"
struct pll_vco {
unsigned long min_freq;
@@ -36,6 +37,12 @@ struct clk_alpha_pll {
size_t num_vco;
struct clk_regmap clkr;
+ u32 config_ctl_val;
+#define PLLOUT_MAIN BIT(0)
+#define PLLOUT_AUX BIT(1)
+#define PLLOUT_AUX2 BIT(2)
+#define PLLOUT_EARLY BIT(3)
+ u32 pllout_flags;
};
/**
@@ -52,6 +59,10 @@ struct clk_alpha_pll_postdiv {
};
extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct pll_config *config);
+
#endif
diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c
new file mode 100644
index 0000000000000..cc25b5e2516ee
--- /dev/null
+++ b/drivers/clk/qcom/clk-cpu-8996.c
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include <asm/cputype.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-regmap-mux.h"
+#include "clk-regmap.h"
+
+#define VCO(a, b, c) { \
+ .val = a,\
+ .min_freq = b,\
+ .max_freq = c,\
+}
+
+static const struct pll_config hfpll_config = {
+ .l = 60,
+ .config_ctl_val = 0x200D4828,
+ .config_ctl_hi_val = 0x006,
+ .pre_div_mask = BIT(12),
+ .post_div_mask = 0x3 << 8,
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+};
+
+static struct clk_pll perfcl_pll = {
+ .l_reg = 0x80004,
+ .alpha_reg = 0x80008,
+ .config_reg = 0x80010,
+ .config_ctl_reg = 0x80018,
+ .mode_reg = 0x80000,
+ .status_reg = 0x80000,
+ .status_bit = 31,
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "perfcl_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_hwfsm_ops,
+ },
+};
+
+static struct clk_pll pwrcl_pll = {
+ .l_reg = 0x0004,
+ .alpha_reg = 0x0008,
+ .config_reg = 0x0010,
+ .config_ctl_reg = 0x0018,
+ .mode_reg = 0x0000,
+ .status_reg = 0x0000,
+ .status_bit = 31,
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pwrcl_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_hwfsm_ops,
+ },
+};
+
+static const struct pll_config cbfpll_config = {
+ .l = 32,
+ .config_ctl_val = 0x200D4828,
+ .config_ctl_hi_val = 0x006,
+ .pre_div_mask = BIT(12),
+ .post_div_mask = 0x3 << 8,
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+};
+
+static struct clk_pll cbf_pll = {
+ .l_reg = 0x0008,
+ .alpha_reg = 0x0010,
+ .config_reg = 0x0018,
+ .config_ctl_reg = 0x0020,
+ .mode_reg = 0x0000,
+ .status_reg = 0x0000,
+ .status_bit = 31,
+ .min_rate = 600000000,
+ .max_rate = 3000000000,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cbf_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_pll_hwfsm_ops,
+ },
+};
+
+static const struct pll_vco alt_pll_vco_modes[] = {
+ VCO(3, 250000000, 500000000),
+ VCO(2, 500000000, 750000000),
+ VCO(1, 750000000, 1000000000),
+ VCO(0, 1000000000, 2150400000),
+};
+
+static const struct pll_config altpll_config = {
+ .config_ctl_val = 0x4001051B,
+ .post_div_mask = 0x3 << 8,
+ .post_div_val = 0x1,
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+};
+
+static struct clk_alpha_pll perfcl_alt_pll = {
+ .offset = 0x80100,
+ .vco_table = alt_pll_vco_modes,
+ .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "perfcl_alt_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_hwfsm_ops,
+ },
+};
+
+static struct clk_alpha_pll pwrcl_alt_pll = {
+ .offset = 0x100,
+ .vco_table = alt_pll_vco_modes,
+ .num_vco = ARRAY_SIZE(alt_pll_vco_modes),
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pwrcl_alt_pll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_hwfsm_ops,
+ },
+};
+
+static struct clk_regmap_mux pwrcl_pmux = {
+ .reg = 0x40,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pwrcl_pmux",
+ .parent_names = (const char *[]){
+ "pwrcl_smux",
+ "pwrcl_pll",
+ "xo",
+ "pwrcl_alt_pll",
+ },
+ .num_parents = 4,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_mux pwrcl_smux = {
+ .reg = 0x40,
+ .shift = 2,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pwrcl_smux",
+ .parent_names = (const char *[]){
+ "xo",
+ "pwrcl_pll_main",
+ "xo",
+ "sys_apcsaux_clk",
+ },
+ .num_parents = 4,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_mux perfcl_pmux = {
+ .reg = 0x80040,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "perfcl_pmux",
+ .parent_names = (const char *[]){
+ "perfcl_smux",
+ "perfcl_pll",
+ "xo",
+ "perfcl_alt_pll",
+ },
+ .num_parents = 4,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_mux perfcl_smux = {
+ .reg = 0x80040,
+ .shift = 2,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "perfcl_smux",
+ .parent_names = (const char *[]){
+ "xo",
+ "perfcl_pll_main",
+ "xo",
+ "sys_apcsaux_clk",
+ },
+ .num_parents = 4,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap_mux cbf_pmux = {
+ .reg = 0x18,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cbf_pmux",
+ .parent_names = (const char *[]){
+ "xo",
+ "cbf_pll",
+ "cbf_pll_main",
+ "sys_apcsaux_clk",
+ },
+ .num_parents = 4,
+ .ops = &clk_regmap_mux_closest_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+struct clk_cpu_8996 {
+ struct clk_hw *alt_pll;
+ unsigned long *alt_pll_freqs;
+ int n_alt_pll_freqs;
+ unsigned long alt_pll_thresh;
+ struct clk_hw *pll;
+ struct clk_hw *pll_post_div;
+ unsigned long post_div_thresh;
+ struct clk_regmap clkr;
+};
+
+static unsigned long alt_pll_perfcl_freqs[] = {
+ 307200000,
+ 556800000,
+};
+
+static inline struct clk_cpu_8996 *to_clk_cpu_8996(struct clk_hw *hw)
+{
+ return container_of(to_clk_regmap(hw), struct clk_cpu_8996, clkr);
+}
+
+static int clk_cpu_8996_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_cpu_8996 *cpuclk = to_clk_cpu_8996(hw);
+ unsigned long n_alt_pll_freqs = cpuclk->n_alt_pll_freqs;
+ unsigned long alt_pll_rate, alt_pll_prev_rate;
+ struct clk *alt_pll, *pll, *parent, *orig_pll = NULL;
+ struct smux;
+ int ret;
+
+ if (rate < cpuclk->post_div_thresh)
+ pll = clk_hw_get_clk(cpuclk->pll_post_div);
+ else
+ pll = clk_hw_get_clk(cpuclk->pll);
+
+ parent = clk_hw_get_clk(clk_hw_get_parent(hw));
+ alt_pll = clk_hw_get_clk(cpuclk->alt_pll);
+
+ /* Check if the alt pll freq should be changed */
+ if (cpuclk->alt_pll_thresh && (n_alt_pll_freqs == 2)) {
+ alt_pll_prev_rate = clk_get_rate(alt_pll);
+ alt_pll_rate = cpuclk->alt_pll_freqs[0];
+ if (rate > cpuclk->alt_pll_thresh)
+ alt_pll_rate = cpuclk->alt_pll_freqs[1];
+ ret = clk_set_rate(alt_pll, alt_pll_rate);
+ if (ret)
+ return ret;
+ }
+
+ /* Switch parent to alt pll */
+ if (cpuclk->alt_pll) {
+ orig_pll = clk_get_parent(parent);
+ ret = clk_set_parent(parent, alt_pll);
+ if (ret)
+ return ret;
+ }
+
+ /* Set the PLL to new rate */
+ ret = clk_set_rate(pll, rate);
+ if (ret)
+ goto error;
+
+ /* Switch back to primary pll */
+ if (cpuclk->alt_pll) {
+ ret = clk_set_parent(parent, pll);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ if (cpuclk->alt_pll)
+ clk_set_parent(parent, orig_pll);
+
+ return ret;
+}
+
+static unsigned long clk_cpu_8996_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ return clk_hw_get_rate(clk_hw_get_parent(hw));
+}
+
+static long clk_cpu_8996_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ return clk_hw_round_rate(clk_hw_get_parent(hw), rate);
+}
+
+static struct clk_ops clk_cpu_8996_ops = {
+ .set_rate = clk_cpu_8996_set_rate,
+ .recalc_rate = clk_cpu_8996_recalc_rate,
+ .round_rate = clk_cpu_8996_round_rate,
+};
+
+static struct clk_cpu_8996 pwrcl_clk = {
+ .alt_pll = &pwrcl_alt_pll.clkr.hw,
+ .pll = &pwrcl_pll.clkr.hw,
+ .pll_post_div = &pwrcl_smux.clkr.hw,
+ .post_div_thresh = 600000000,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "pwrcl_clk",
+ .parent_names = (const char *[]){ "pwrcl_pmux" },
+ .num_parents = 1,
+ .ops = &clk_cpu_8996_ops,
+ },
+};
+
+static struct clk_cpu_8996 perfcl_clk = {
+ .alt_pll = &perfcl_alt_pll.clkr.hw,
+ .alt_pll_freqs = alt_pll_perfcl_freqs,
+ .alt_pll_thresh = 1190400000,
+ .n_alt_pll_freqs = ARRAY_SIZE(alt_pll_perfcl_freqs),
+ .pll = &perfcl_pll.clkr.hw,
+ .pll_post_div = &perfcl_smux.clkr.hw,
+ .post_div_thresh = 600000000,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "perfcl_clk",
+ .parent_names = (const char *[]){ "perfcl_pmux" },
+ .num_parents = 1,
+ .ops = &clk_cpu_8996_ops,
+ },
+};
+
+static struct clk_cpu_8996 cbfcl_clk = {
+ .pll = &cbf_pll.clkr.hw,
+ .post_div_thresh = 600000000,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "cbf_clk",
+ .parent_names = (const char *[]){ "cbf_pmux" },
+ .num_parents = 1,
+ .ops = &clk_cpu_8996_ops,
+ },
+};
+
+static const struct regmap_config cpu_msm8996_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80210,
+ .fast_io = true,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
+};
+
+static const struct of_device_id match_table[] = {
+ { .compatible = "qcom,cpu-clk-msm8996" },
+ {}
+};
+
+#define cluster_clk_register(dev, clk, clkr) { \
+ clk = devm_clk_register_regmap(dev, clkr); \
+ if (IS_ERR(clk)) \
+ return PTR_ERR(clk); }
+
+#define cbf_clk_register(dev, clk, hw) { \
+ clk = devm_clk_register(dev, hw); \
+ if (IS_ERR(clk)) \
+ return PTR_ERR(clk); }
+
+#define cpu_clk_register_fixed(dev, clk, name, pname, flags, m, n) { \
+ clk = clk_register_fixed_factor(dev, name, pname, flags, m, n); \
+ if (IS_ERR(clk)) \
+ return PTR_ERR(clk); }
+
+#define cpu_set_rate(dev, clk, rate) { \
+ if (clk_set_rate(clk, rate)) \
+ dev_err(dev, "Failed to set " #clk " to " #rate "\n"); }
+
+#define cpu_prepare_enable(dev, clk) { \
+ if (clk_prepare_enable(clk)) \
+ dev_err(dev, "Failed to enable " #clk "\n"); }
+
+#define cpu_set_parent(dev, clk, parent) { \
+ if (clk_set_parent(clk, parent)) \
+ dev_err(dev, "Failed to set parent for " #clk "\n"); }
+
+struct clk *sys_apcsaux, *pwr_clk, *perf_clk, *cbf_clk;
+
+static int register_cpu_clocks(struct device *dev, struct regmap *regmap)
+{
+ /* clocks */
+ struct clk *perf_alt_pll, *pwr_alt_pll, *perf_pll, *pwr_pll;
+ struct clk *perf_pmux, *perf_smux, *pwr_pmux, *pwr_smux;
+ struct clk *perf_pll_main, *pwr_pll_main;
+
+ /* Initialise the PLLs */
+ clk_pll_configure_variable_rate(&perfcl_pll, regmap, &hfpll_config);
+ clk_pll_configure_variable_rate(&pwrcl_pll, regmap, &hfpll_config);
+ clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config);
+ clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config);
+
+ /* PLLs */
+ cluster_clk_register(dev, perf_pll, &perfcl_pll.clkr);
+ cluster_clk_register(dev, pwr_pll, &pwrcl_pll.clkr);
+ cluster_clk_register(dev, perf_alt_pll, &perfcl_alt_pll.clkr);
+ cluster_clk_register(dev, pwr_alt_pll, &pwrcl_alt_pll.clkr);
+
+ /* MUXs */
+ cluster_clk_register(dev, perf_pmux, &perfcl_pmux.clkr);
+ cluster_clk_register(dev, perf_smux, &perfcl_smux.clkr);
+ cluster_clk_register(dev, pwr_pmux, &pwrcl_pmux.clkr);
+ cluster_clk_register(dev, pwr_smux, &pwrcl_smux.clkr);
+
+ /* Fixed factor CLKs */
+ cpu_clk_register_fixed(dev, perf_pll_main, "perfcl_pll_main",
+ "perfcl_pll", CLK_SET_RATE_PARENT, 1, 2);
+ cpu_clk_register_fixed(dev, pwr_pll_main, "pwrcl_pll_main",
+ "pwrcl_pll", CLK_SET_RATE_PARENT, 1, 2);
+
+ /* Init alt pll to boot frequency */
+ cpu_set_rate(dev, perf_alt_pll, 307200000);
+ cpu_set_rate(dev, pwr_alt_pll, 307200000);
+
+ /* Enable all PLLs and alt PLLs */
+ cpu_prepare_enable(dev, perf_pll);
+ cpu_prepare_enable(dev, pwr_pll);
+ cpu_prepare_enable(dev, perf_alt_pll);
+ cpu_prepare_enable(dev, pwr_alt_pll);
+
+ /* Init MUXes with default parents */
+ cpu_set_parent(dev, perf_pmux, perf_pll);
+ cpu_set_parent(dev, pwr_pmux, pwr_pll);
+ cpu_set_parent(dev, perf_smux, perf_pll_main);
+ cpu_set_parent(dev, pwr_smux, pwr_pll_main);
+
+ /* Register CPU clocks */
+ cluster_clk_register(dev, perf_clk, &perfcl_clk.clkr);
+ cluster_clk_register(dev, pwr_clk, &pwrcl_clk.clkr);
+
+ return 0;
+}
+
+static int register_cbf_clocks(struct device *dev, struct regmap *regmap)
+{
+ struct clk *cbf_pll_clk, *cbf_pmux_clk, *cbf_pll_main_clk;
+
+ cbf_pll.clkr.regmap = regmap;
+ cbf_pmux.clkr.regmap = regmap;
+ cbfcl_clk.clkr.regmap = regmap;
+
+ clk_pll_configure_variable_rate(&cbf_pll, regmap, &cbfpll_config);
+
+ cbf_clk_register(dev, cbf_pll_clk, &cbf_pll.clkr.hw);
+ cbf_clk_register(dev, cbf_pmux_clk, &cbf_pmux.clkr.hw);
+
+ cpu_clk_register_fixed(dev, cbf_pll_main_clk, "cbf_pll_main", "cbf_pll",
+ CLK_SET_RATE_PARENT, 1, 2);
+
+ cpu_prepare_enable(dev, cbf_pll_clk);
+ cpu_set_parent(dev, cbf_pmux_clk, cbf_pll_clk);
+
+ cbfcl_clk.alt_pll = __clk_get_hw(sys_apcsaux);
+ cbfcl_clk.pll_post_div = __clk_get_hw(cbf_pll_main_clk);
+
+ cbf_clk_register(dev, cbf_clk, &cbfcl_clk.clkr.hw);
+
+ return 0;
+}
+
+static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *regmap_cpu, *regmap_cbf;
+ struct clk_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clks = devm_kcalloc(dev, 3, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ cpu_clk_register_fixed(dev, sys_apcsaux, "sys_apcsaux_clk",
+ "gpll0_early", 0, 1, 1);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_cpu = devm_regmap_init_mmio(dev, base,
+ &cpu_msm8996_regmap_config);
+ if (IS_ERR(regmap_cpu))
+ return PTR_ERR(regmap_cpu);
+
+ ret = register_cpu_clocks(dev, regmap_cpu);
+ if (ret)
+ return ret;
+
+ regmap_cbf = syscon_regmap_lookup_by_phandle(dev->of_node, "qcom,cbf");
+ if (IS_ERR(regmap_cbf))
+ return PTR_ERR(regmap_cbf);
+
+ ret = register_cbf_clocks(dev, regmap_cbf);
+ if (ret)
+ return ret;
+
+ data->clks[0] = pwr_clk;
+ data->clks[1] = perf_clk;
+ data->clks[2] = cbf_clk;
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+}
+
+static struct platform_driver qcom_cpu_clk_msm8996_driver = {
+ .probe = qcom_cpu_clk_msm8996_driver_probe,
+ .driver = {
+ .name = "qcom-cpu-clk-msm8996",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+builtin_platform_driver(qcom_cpu_clk_msm8996_driver);
+
+MODULE_DESCRIPTION("CPU clock driver for msm8996");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
new file mode 100644
index 0000000000000..eacf853c132e9
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __clk_hfpll_init_once(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ regmap_write(regmap, hd->config_reg, hd->config_val);
+ regmap_write(regmap, hd->m_reg, 0);
+ regmap_write(regmap, hd->n_reg, 1);
+
+ if (hd->user_reg) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = clk_hw_get_rate(hw);
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, regval);
+ }
+
+ if (hd->droop_reg)
+ regmap_write(regmap, hd->droop_reg, hd->droop_val);
+
+ h->init_done = true;
+}
+
+static void __clk_hfpll_enable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 val;
+
+ __clk_hfpll_init_once(hw);
+
+ /* Disable PLL bypass mode. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_reg) {
+ do {
+ regmap_read(regmap, hd->status_reg, &val);
+ } while (!(val & BIT(hd->lock_bit)));
+ } else {
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+/* Enable an already-configured HFPLL. */
+static int clk_hfpll_enable(struct clk_hw *hw)
+{
+ unsigned long flags;
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ spin_lock_irqsave(&h->lock, flags);
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)))
+ __clk_hfpll_enable(hw);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static void __clk_hfpll_disable(struct clk_hfpll *h)
+{
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ regmap_update_bits(regmap, hd->mode_reg,
+ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0);
+}
+
+static void clk_hfpll_disable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ __clk_hfpll_disable(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ rate = clamp(rate, hd->min_rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ if (rrate > hd->max_rate)
+ rrate -= *parent_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ unsigned long flags;
+ u32 l_val, val;
+ bool enabled;
+
+ l_val = rate / parent_rate;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ enabled = __clk_is_enabled(hw->clk);
+ if (enabled)
+ __clk_hfpll_disable(h);
+
+ /* Pick the right VCO. */
+ if (hd->user_reg && hd->user_vco_mask) {
+ regmap_read(regmap, hd->user_reg, &val);
+ if (rate <= hd->low_vco_max_rate)
+ val &= ~hd->user_vco_mask;
+ else
+ val |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, val);
+ }
+
+ regmap_write(regmap, hd->l_reg, l_val);
+
+ if (enabled)
+ __clk_hfpll_enable(hw);
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 l_val;
+
+ regmap_read(regmap, hd->l_reg, &l_val);
+
+ return l_val * parent_rate;
+}
+
+static void clk_hfpll_init(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode, status;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
+ __clk_hfpll_init_once(hw);
+ return;
+ }
+
+ if (hd->status_reg) {
+ regmap_read(regmap, hd->status_reg, &status);
+ if (!(status & BIT(hd->lock_bit))) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n",
+ __clk_get_name(hw->clk));
+ clk_hfpll_disable(hw);
+ __clk_hfpll_init_once(hw);
+ }
+ }
+}
+
+static int hfpll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ mode &= 0x7;
+ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL);
+}
+
+const struct clk_ops clk_ops_hfpll = {
+ .enable = clk_hfpll_enable,
+ .disable = clk_hfpll_disable,
+ .is_enabled = hfpll_is_enabled,
+ .round_rate = clk_hfpll_round_rate,
+ .set_rate = clk_hfpll_set_rate,
+ .recalc_rate = clk_hfpll_recalc_rate,
+ .init = clk_hfpll_init,
+};
+EXPORT_SYMBOL_GPL(clk_ops_hfpll);
diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h
new file mode 100644
index 0000000000000..48c18d664f4ea
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_CLK_HFPLL_H__
+#define __QCOM_CLK_HFPLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include "clk-regmap.h"
+
+struct hfpll_data {
+ u32 mode_reg;
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 user_reg;
+ u32 droop_reg;
+ u32 config_reg;
+ u32 status_reg;
+ u8 lock_bit;
+
+ u32 droop_val;
+ u32 config_val;
+ u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct clk_hfpll {
+ struct hfpll_data const *d;
+ int init_done;
+
+ struct clk_regmap clkr;
+ spinlock_t lock;
+};
+
+#define to_clk_hfpll(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr)
+
+extern const struct clk_ops clk_ops_hfpll;
+
+#endif
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
new file mode 100644
index 0000000000000..84277741a9c82
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include <asm/krait-l2-accessors.h>
+
+#include "clk-krait.h"
+
+/* Secondary and primary muxes share the same cp15 register */
+static DEFINE_SPINLOCK(krait_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ regval = krait_get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->lpl) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+
+static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = clk_mux_reindex(index, mux->parent_map, 0);
+ mux->en_mask = sel;
+ /* Don't touch mux if CPU is off as it won't work */
+ if (__clk_is_enabled(hw->clk))
+ __krait_mux_set_sel(mux, sel);
+ return 0;
+}
+
+static u8 krait_mux_get_parent(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = krait_get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return clk_mux_get_parent(hw, sel, mux->parent_map, 0);
+}
+
+static struct clk_hw *krait_mux_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ i = mux->safe_sel;
+ for (i = 0; i < num_parents; i++)
+ if (mux->safe_sel == mux->parent_map[i])
+ break;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static int krait_mux_enable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->en_mask);
+
+ return 0;
+}
+
+static void krait_mux_disable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->safe_sel);
+}
+
+const struct clk_ops krait_mux_clk_ops = {
+ .enable = krait_mux_enable,
+ .disable = krait_mux_disable,
+ .set_parent = krait_mux_set_parent,
+ .get_parent = krait_mux_get_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_safe_parent = krait_mux_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
+
+/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
+static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
+ return DIV_ROUND_UP(*parent_rate, 2);
+}
+
+static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ unsigned long flags;
+ u32 val;
+ u32 mask = BIT(d->width) - 1;
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+ val &= ~mask;
+ krait_set_l2_indirect_reg(d->offset, val);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static unsigned long
+krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ u32 mask = BIT(d->width) - 1;
+ u32 div;
+
+ div = krait_get_l2_indirect_reg(d->offset);
+ div >>= d->shift;
+ div &= mask;
+ div = (div + 1) * 2;
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+const struct clk_ops krait_div2_clk_ops = {
+ .round_rate = krait_div2_round_rate,
+ .set_rate = krait_div2_set_rate,
+ .recalc_rate = krait_div2_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(krait_div2_clk_ops);
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
new file mode 100644
index 0000000000000..5d0063538e5d2
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_KRAIT_H
+#define __QCOM_CLK_KRAIT_H
+
+#include <linux/clk-provider.h>
+
+struct krait_mux_clk {
+ unsigned int *parent_map;
+ bool has_safe_parent;
+ u8 safe_sel;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw)
+
+extern const struct clk_ops krait_mux_clk_ops;
+
+struct krait_div2_clk {
+ u32 offset;
+ u8 width;
+ u32 shift;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw)
+
+extern const struct clk_ops krait_div2_clk_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c
index 5b940d629045f..4026b3c09e35a 100644
--- a/drivers/clk/qcom/clk-pll.c
+++ b/drivers/clk/qcom/clk-pll.c
@@ -255,8 +255,13 @@ static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
u32 mask;
regmap_write(regmap, pll->l_reg, config->l);
- regmap_write(regmap, pll->m_reg, config->m);
- regmap_write(regmap, pll->n_reg, config->n);
+
+ if (pll->alpha_reg) {
+ regmap_write(regmap, pll->alpha_reg, config->alpha);
+ } else {
+ regmap_write(regmap, pll->m_reg, config->m);
+ regmap_write(regmap, pll->n_reg, config->n);
+ }
val = config->vco_val;
val |= config->pre_div_val;
@@ -264,6 +269,7 @@ static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
val |= config->mn_ena_mask;
val |= config->main_output_mask;
val |= config->aux_output_mask;
+ val |= config->early_output_mask;
mask = config->vco_mask;
mask |= config->pre_div_mask;
@@ -271,9 +277,24 @@ static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
mask |= config->mn_ena_mask;
mask |= config->main_output_mask;
mask |= config->aux_output_mask;
+ mask |= config->early_output_mask;
regmap_update_bits(regmap, pll->config_reg, mask, val);
+
+}
+
+void clk_pll_configure_variable_rate(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config)
+{
+ clk_pll_configure(pll, regmap, config);
+ regmap_write(regmap, pll->config_ctl_reg, config->config_ctl_val);
+ regmap_write(regmap, pll->config_ctl_reg + 4,
+ config->config_ctl_hi_val);
+
+ /* Enable FSM mode with bias count as 0x6 */
+ regmap_write(regmap, pll->mode_reg, 0x00118000);
}
+EXPORT_SYMBOL_GPL(clk_pll_configure_variable_rate);
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
const struct pll_config *config, bool fsm_mode)
@@ -367,3 +388,74 @@ const struct clk_ops clk_pll_sr2_ops = {
.determine_rate = clk_pll_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_pll_sr2_ops);
+
+static int clk_pll_hwfsm_enable(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ /* Wait for 50us explicitly to avoid transient locks */
+ udelay(50);
+ return wait_for_pll(pll);
+};
+
+static void clk_pll_hwfsm_disable(struct clk_hw *hw)
+{
+ /* 8 reference clock cycle delay mandated by the HPG */
+ udelay(1);
+};
+
+static unsigned long
+clk_pll_hwfsm_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ u32 l_val;
+ int ret;
+
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ ret = regmap_read(pll->clkr.regmap, pll->l_reg, &l_val);
+ if (ret)
+ return ret;
+
+ return l_val * parent_rate;
+};
+
+static int
+clk_pll_hwfsm_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ const struct pll_freq_tbl *f;
+
+ f = find_freq(pll->freq_tbl, req->rate);
+ if (!f)
+ req->rate = DIV_ROUND_UP(req->rate, req->best_parent_rate)
+ * req->best_parent_rate;
+ else
+ req->rate = f->freq;
+
+ return 0;
+}
+
+static int
+clk_pll_hwfsm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ u32 l_val;
+ struct clk_pll *pll = to_clk_pll(hw);
+
+ if ((rate < pll->min_rate) || ( rate > pll->max_rate) || !prate)
+ return -EINVAL;
+
+ l_val = rate / prate;
+ regmap_write(pll->clkr.regmap, pll->l_reg, l_val);
+
+ return 0;
+}
+
+const struct clk_ops clk_pll_hwfsm_ops = {
+ .enable = clk_pll_hwfsm_enable,
+ .disable = clk_pll_hwfsm_disable,
+ .set_rate = clk_pll_hwfsm_set_rate,
+ .recalc_rate = clk_pll_hwfsm_recalc_rate,
+ .determine_rate = clk_pll_hwfsm_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_pll_hwfsm_ops);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index ffd0c63bddbc4..333d576bfe817 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -48,12 +48,16 @@ struct clk_pll {
u32 l_reg;
u32 m_reg;
u32 n_reg;
+ u32 alpha_reg;
u32 config_reg;
u32 mode_reg;
u32 status_reg;
+ u32 config_ctl_reg;
u8 status_bit;
u8 post_div_width;
u8 post_div_shift;
+ unsigned long min_rate;
+ unsigned long max_rate;
const struct pll_freq_tbl *freq_tbl;
@@ -63,6 +67,7 @@ struct clk_pll {
extern const struct clk_ops clk_pll_ops;
extern const struct clk_ops clk_pll_vote_ops;
extern const struct clk_ops clk_pll_sr2_ops;
+extern const struct clk_ops clk_pll_hwfsm_ops;
#define to_clk_pll(_hw) container_of(to_clk_regmap(_hw), struct clk_pll, clkr)
@@ -70,6 +75,7 @@ struct pll_config {
u16 l;
u32 m;
u32 n;
+ u32 alpha;
u32 vco_val;
u32 vco_mask;
u32 pre_div_val;
@@ -79,11 +85,16 @@ struct pll_config {
u32 mn_ena_mask;
u32 main_output_mask;
u32 aux_output_mask;
+ u32 aux2_output_mask;
+ u32 early_output_mask;
+ u32 config_ctl_val;
+ u32 config_ctl_hi_val;
};
void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
const struct pll_config *config, bool fsm_mode);
void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
const struct pll_config *config, bool fsm_mode);
-
+void clk_pll_configure_variable_rate(struct clk_pll *pll, struct regmap *regmap,
+ const struct pll_config *config);
#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c
new file mode 100644
index 0000000000000..dcf12e5526510
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap-mux-div.h"
+
+#define CMD_RCGR 0x0
+#define CMD_RCGR_UPDATE BIT(0)
+#define CMD_RCGR_DIRTY_CFG BIT(4)
+#define CMD_RCGR_ROOT_OFF BIT(31)
+#define CFG_RCGR 0x4
+
+static int __mux_div_update_config(struct clk_regmap_mux_div *md)
+{
+ int ret;
+ u32 val, count;
+ const char *name = clk_hw_get_name(&md->clkr.hw);
+
+ ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & CMD_RCGR_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ pr_err("%s: RCG did not update its configuration", name);
+ return -EBUSY;
+}
+
+static int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src_sel,
+ u32 src_div)
+{
+ int ret;
+ u32 val, mask;
+
+ val = (src_div << md->hid_shift) | (src_sel << md->src_shift);
+ mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
+ ((BIT(md->src_width) - 1) << md->src_shift);
+
+ ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
+ mask, val);
+ if (ret)
+ return ret;
+
+ return __mux_div_update_config(md);
+}
+
+static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src_sel,
+ u32 *src_div)
+{
+ u32 val, div, src;
+ const char *name = clk_hw_get_name(&md->clkr.hw);
+
+ regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
+
+ if (val & CMD_RCGR_DIRTY_CFG) {
+ pr_err("%s: RCG configuration is pending\n", name);
+ return;
+ }
+
+ regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
+ src = (val >> md->src_shift);
+ src &= BIT(md->src_width) - 1;
+ *src_sel = src;
+
+ div = (val >> md->hid_shift);
+ div &= BIT(md->hid_width) - 1;
+ *src_div = div;
+}
+
+static int mux_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->src_sel, md->div);
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+ unsigned long new)
+{
+ return (req <= new && new < best) || (best < req && best < new);
+}
+
+static int mux_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ unsigned int i, div, max_div;
+ unsigned long actual_rate, best_rate = 0;
+ unsigned long req_rate = req->rate;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(req_rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(req_rate, best_rate, actual_rate)) {
+ best_rate = actual_rate;
+ req->rate = best_rate;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (actual_rate < req_rate || best_rate <= req_rate)
+ break;
+ }
+ }
+
+ if (!best_rate)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u32 src_sel)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int ret, i;
+ u32 div, max_div, best_src = 0, best_div = 0;
+ unsigned long actual_rate, best_rate = 0;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(rate, best_rate, actual_rate)) {
+ best_rate = actual_rate;
+ best_src = md->parent_map[i].cfg;
+ best_div = div - 1;
+ }
+
+ if (actual_rate < rate || best_rate <= rate)
+ break;
+ }
+ }
+
+ ret = __mux_div_set_src_div(md, best_src, best_div);
+ if (!ret) {
+ md->div = best_div;
+ md->src_sel = best_src;
+ }
+
+ return ret;
+}
+
+static u8 mux_div_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ const char *name = clk_hw_get_name(hw);
+ u32 i, div, src;
+
+ __mux_div_get_src_div(md, &src, &div);
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+ if (src == md->parent_map[i].cfg)
+ return i;
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static int mux_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div);
+}
+
+static int mux_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate, md->src_sel);
+}
+
+static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate,
+ md->parent_map[index].cfg);
+}
+
+static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ u32 div, src;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ const char *name = clk_hw_get_name(hw);
+
+ __mux_div_get_src_div(md, &src, &div);
+ for (i = 0; i < num_parents; i++)
+ if (src == md->parent_map[i].cfg) {
+ struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(p);
+
+ return mult_frac(parent_rate, 2, div + 1);
+ }
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static struct clk_hw *mux_div_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ if (md->safe_freq)
+ *safe_freq = md->safe_freq;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++)
+ if (md->safe_src == md->parent_map[i].cfg)
+ break;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static void mux_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ struct clk_hw *parent;
+ u32 div;
+
+ if (!md->safe_freq || !md->safe_src)
+ return;
+
+ parent = mux_div_get_safe_parent(hw, &md->safe_freq);
+ div = divider_get_val(md->safe_freq, clk_get_rate(parent->clk), NULL,
+ md->hid_width, CLK_DIVIDER_ROUND_CLOSEST);
+ div = 2 * div + 1;
+
+ __mux_div_set_src_div(md, md->safe_src, div);
+}
+
+const struct clk_ops clk_regmap_mux_div_ops = {
+ .enable = mux_div_enable,
+ .disable = mux_div_disable,
+ .get_parent = mux_div_get_parent,
+ .set_parent = mux_div_set_parent,
+ .set_rate = mux_div_set_rate,
+ .set_rate_and_parent = mux_div_set_rate_and_parent,
+ .determine_rate = mux_div_determine_rate,
+ .recalc_rate = mux_div_recalc_rate,
+ .get_safe_parent = mux_div_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
new file mode 100644
index 0000000000000..7610a0e4abd76
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
+#define __QCOM_CLK_REGMAP_MUX_DIV_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+
+/**
+ * struct mux_div_clk - combined mux/divider clock
+ * @reg_offset: offset of the mux/divider register
+ * @hid_width: number of bits in half integer divider
+ * @hid_shift: lowest bit of hid value field
+ * @src_width: number of bits in source select
+ * @src_shift: lowest bit of source select field
+ * @div: the divider raw configuration value
+ * @src_sel: the mux index which will be used if the clock is enabled
+ * @safe_src: the safe source mux index for this clock
+ * @safe_freq: When switching rates from A to B, the mux div clock will
+ * instead switch from A -> safe_freq -> B. This allows the
+ * mux_div clock to change rates while enabled, even if this
+ * behavior is not supported by the parent clocks.
+ * If changing the rate of parent A also causes the rate of
+ * parent B to change, then safe_freq must be defined.
+ * safe_freq is expected to have a source clock which is always
+ * on and runs at only one rate.
+ * @parent_map: pointer to parent_map struct
+ * @clkr: handle between common and hardware-specific interfaces
+ */
+
+struct clk_regmap_mux_div {
+ u32 reg_offset;
+ u32 hid_width;
+ u32 hid_shift;
+ u32 src_width;
+ u32 src_shift;
+ u32 div;
+ u32 src_sel;
+ u32 safe_src;
+ unsigned long safe_freq;
+ const struct parent_map *parent_map;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_regmap_mux_div(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+extern const struct clk_ops clk_regmap_mux_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c
new file mode 100644
index 0000000000000..05cf83c2ca833
--- /dev/null
+++ b/drivers/clk/qcom/clk-rpm.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/mfd/qcom-rpm.h>
+#include <dt-bindings/clock/qcom,rpmcc.h>
+
+#define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "pxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
+ static struct clk_rpm _platform##_##_active; \
+ static struct clk_rpm _platform##_##_name = { \
+ .rpm_clk_id = (r_id), \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_rpm _platform##_##_active = { \
+ .rpm_clk_id = (r_id), \
+ .active_only = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .branch = true, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "cxo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
+
+struct clk_rpm {
+ const int rpm_clk_id;
+ const bool active_only;
+ unsigned long rate;
+ bool enabled;
+ bool branch;
+ struct clk_rpm *peer;
+ struct clk_hw hw;
+ struct qcom_rpm *rpm;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+struct rpm_clk_desc {
+ struct clk_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_rpm_handoff(struct clk_rpm *r)
+{
+ int ret;
+ u32 value = INT_MAX;
+
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+ ret = qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_rpm_set_rate_active(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static int clk_rpm_set_rate_sleep(struct clk_rpm *r, unsigned long rate)
+{
+ u32 value = DIV_ROUND_UP(rate, 1000); /* to kHz */
+
+ return qcom_rpm_write(r->rpm, QCOM_RPM_SLEEP_STATE,
+ r->rpm_clk_id, &value, 1);
+}
+
+static void to_active_sleep(struct clk_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_rpm_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+ struct clk_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_rpm *r = to_clk_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static const struct clk_ops clk_rpm_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .set_rate = clk_rpm_set_rate,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_rpm_branch_ops = {
+ .prepare = clk_rpm_prepare,
+ .unprepare = clk_rpm_unprepare,
+ .round_rate = clk_rpm_round_rate,
+ .recalc_rate = clk_rpm_recalc_rate,
+};
+
+/* apq8064 */
+DEFINE_CLK_RPM_PXO_BRANCH(apq8064, pxo, pxo_a_clk, QCOM_RPM_PXO_CLK, 27000000);
+DEFINE_CLK_RPM_CXO_BRANCH(apq8064, cxo, cxo_a_clk, QCOM_RPM_CXO_CLK, 19200000);
+DEFINE_CLK_RPM(apq8064, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK);
+DEFINE_CLK_RPM(apq8064, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK);
+DEFINE_CLK_RPM(apq8064, mmfab_clk, mmfab_a_clk, QCOM_RPM_MM_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, mmfpb_clk, mmfpb_a_clk, QCOM_RPM_MMFPB_CLK);
+DEFINE_CLK_RPM(apq8064, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK);
+DEFINE_CLK_RPM(apq8064, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK);
+DEFINE_CLK_RPM(apq8064, qdss_clk, qdss_a_clk, QCOM_RPM_QDSS_CLK);
+
+static struct clk_rpm *apq8064_clks[] = {
+ [RPM_PXO_CLK] = &apq8064_pxo,
+ [RPM_PXO_A_CLK] = &apq8064_pxo_a_clk,
+ [RPM_CXO_CLK] = &apq8064_cxo,
+ [RPM_CXO_A_CLK] = &apq8064_cxo_a_clk,
+ [RPM_APPS_FABRIC_CLK] = &apq8064_afab_clk,
+ [RPM_APPS_FABRIC_A_CLK] = &apq8064_afab_a_clk,
+ [RPM_CFPB_CLK] = &apq8064_cfpb_clk,
+ [RPM_CFPB_A_CLK] = &apq8064_cfpb_a_clk,
+ [RPM_DAYTONA_FABRIC_CLK] = &apq8064_daytona_clk,
+ [RPM_DAYTONA_FABRIC_A_CLK] = &apq8064_daytona_a_clk,
+ [RPM_EBI1_CLK] = &apq8064_ebi1_clk,
+ [RPM_EBI1_A_CLK] = &apq8064_ebi1_a_clk,
+ [RPM_MM_FABRIC_CLK] = &apq8064_mmfab_clk,
+ [RPM_MM_FABRIC_A_CLK] = &apq8064_mmfab_a_clk,
+ [RPM_MMFPB_CLK] = &apq8064_mmfpb_clk,
+ [RPM_MMFPB_A_CLK] = &apq8064_mmfpb_a_clk,
+ [RPM_SYS_FABRIC_CLK] = &apq8064_sfab_clk,
+ [RPM_SYS_FABRIC_A_CLK] = &apq8064_sfab_a_clk,
+ [RPM_SFPB_CLK] = &apq8064_sfpb_clk,
+ [RPM_SFPB_A_CLK] = &apq8064_sfpb_a_clk,
+ [RPM_QDSS_CLK] = &apq8064_qdss_clk,
+ [RPM_QDSS_A_CLK] = &apq8064_qdss_a_clk,
+};
+
+static const struct rpm_clk_desc rpm_clk_apq8064 = {
+ .clks = apq8064_clks,
+ .num_clks = ARRAY_SIZE(apq8064_clks),
+};
+
+static const struct of_device_id rpm_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_clk_match_table);
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *rcc;
+ struct clk_onecell_data *data;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_rpm *rpm;
+ struct clk_rpm **rpm_clks;
+ const struct rpm_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_clks[i]->rpm = rpm;
+
+ ret = clk_rpm_handoff(rpm_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering RPM Clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-rpm",
+ .of_match_table = rpm_clk_match_table,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+core_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-rpm");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 0000000000000..06eb060092682
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2016, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include <dt-bindings/clock/qcom,rpmcc.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+#define __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, stat_id, \
+ key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .peer = &_platform##_##_name, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, \
+ stat_id, r, key) \
+ static struct clk_smd_rpm _platform##_##_active; \
+ static struct clk_smd_rpm _platform##_##_name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_active, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm _platform##_##_active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .active_only = true, \
+ .rpm_key = (key), \
+ .branch = true, \
+ .peer = &_platform##_##_name, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }
+
+#define DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, type, \
+ r_id, 0, r, QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_platform, _name, _active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
+ 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+struct rpm_smd_clk_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static DEFINE_MUTEX(rpm_smd_clk_lock);
+
+static int clk_smd_rpm_handoff(struct clk_smd_rpm *r)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(INT_MAX),
+ };
+
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+ ret = qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long rate)
+{
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(r->rpm_key),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->rate)
+ goto out;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ int ret = 0;
+
+ mutex_lock(&rpm_smd_clk_lock);
+
+ if (!r->enabled)
+ goto out;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+
+ r->rate = rate;
+
+out:
+ mutex_unlock(&rpm_smd_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+static int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = cpu_to_le32(QCOM_RPM_SMD_KEY_ENABLE),
+ .nbytes = cpu_to_le32(sizeof(u32)),
+ .value = cpu_to_le32(1),
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+
+static const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+static const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+
+/* msm8916 */
+DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8916_clks[] = {
+ [RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8916_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8916_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8916_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8916_qdss_a_clk,
+ [RPM_SMD_BB_CLK1] = &msm8916_bb_clk1,
+ [RPM_SMD_BB_CLK1_A] = &msm8916_bb_clk1_a,
+ [RPM_SMD_BB_CLK2] = &msm8916_bb_clk2,
+ [RPM_SMD_BB_CLK2_A] = &msm8916_bb_clk2_a,
+ [RPM_SMD_RF_CLK1] = &msm8916_rf_clk1,
+ [RPM_SMD_RF_CLK1_A] = &msm8916_rf_clk1_a,
+ [RPM_SMD_RF_CLK2] = &msm8916_rf_clk2,
+ [RPM_SMD_RF_CLK2_A] = &msm8916_rf_clk2_a,
+ [RPM_SMD_BB_CLK1_PIN] = &msm8916_bb_clk1_pin,
+ [RPM_SMD_BB_CLK1_A_PIN] = &msm8916_bb_clk1_a_pin,
+ [RPM_SMD_BB_CLK2_PIN] = &msm8916_bb_clk2_pin,
+ [RPM_SMD_BB_CLK2_A_PIN] = &msm8916_bb_clk2_a_pin,
+ [RPM_SMD_RF_CLK1_PIN] = &msm8916_rf_clk1_pin,
+ [RPM_SMD_RF_CLK1_A_PIN] = &msm8916_rf_clk1_a_pin,
+ [RPM_SMD_RF_CLK2_PIN] = &msm8916_rf_clk2_pin,
+ [RPM_SMD_RF_CLK2_A_PIN] = &msm8916_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8916 = {
+ .clks = msm8916_clks,
+ .num_clks = ARRAY_SIZE(msm8916_clks),
+};
+
+/* msm8996 */
+DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
+DEFINE_CLK_SMD_RPM(msm8996, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
+DEFINE_CLK_SMD_RPM(msm8996, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, mmssnoc_axi_rpm_clk, mmssnoc_axi_rpm_a_clk,
+ QCOM_SMD_RPM_MMAXI_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0);
+DEFINE_CLK_SMD_RPM(msm8996, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, cxo, cxo_a, QCOM_SMD_RPM_MISC_CLK, 0, 19200000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre1_noc_clk, aggre1_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 0, 1000);
+DEFINE_CLK_SMD_RPM_BRANCH(msm8996, aggre2_noc_clk, aggre2_noc_a_clk,
+ QCOM_SMD_RPM_AGGR_CLK, 1, 1000);
+DEFINE_CLK_SMD_RPM_QDSS(msm8996, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk1, bb_clk1_a, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, bb_clk2, bb_clk2_a, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk1, rf_clk1_a, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, rf_clk2, rf_clk2_a, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk1, div_clk1_ao, 0xb);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk2, div_clk2_ao, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8996, div_clk3, div_clk3_ao, 0xc);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk1_pin, bb_clk1_a_pin, 1);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, bb_clk2_pin, bb_clk2_a_pin, 2);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk1_pin, rf_clk1_a_pin, 4);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8996, rf_clk2_pin, rf_clk2_a_pin, 5);
+
+static struct clk_smd_rpm *msm8996_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &msm8996_cxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &msm8996_cxo_a,
+ [RPM_AGGR1_NOC_CLK] = &msm8996_aggre1_noc_clk,
+ [RPM_AGGR1_NOC_A_CLK] = &msm8996_aggre1_noc_a_clk,
+ [RPM_AGGR2_NOC_CLK] = &msm8996_aggre2_noc_clk,
+ [RPM_AGGR2_NOC_A_CLK] = &msm8996_aggre2_noc_a_clk,
+ [RPM_SMD_PCNOC_CLK] = &msm8996_pcnoc_clk,
+ [RPM_SMD_PCNOC_A_CLK] = &msm8996_pcnoc_a_clk,
+ [RPM_SMD_SNOC_CLK] = &msm8996_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &msm8996_snoc_a_clk,
+ [RPM_CNOC_CLK] = &msm8996_cnoc_clk,
+ [RPM_CNOC_A_CLK] = &msm8996_cnoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8996_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8996_bimc_a_clk,
+ [RPM_MMAXI_CLK] = &msm8996_mmssnoc_axi_rpm_clk,
+ [RPM_MMAXI_A_CLK] = &msm8996_mmssnoc_axi_rpm_a_clk,
+ [RPM_IPA_CLK] = &msm8996_ipa_clk,
+ [RPM_IPA_A_CLK] = &msm8996_ipa_a_clk,
+ [RPM_CE1_CLK] = &msm8996_ce1_clk,
+ [RPM_CE1_A_CLK] = &msm8996_ce1_a_clk,
+ [RPM_SMD_QDSS_CLK] = &msm8996_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &msm8996_qdss_a_clk,
+ [RPM_LN_BB_CLK] = &msm8996_ln_bb_clk,
+ [RPM_LN_BB_A_CLK] = &msm8996_ln_bb_a_clk,
+ [RPM_DIV_CLK1] = &msm8996_div_clk1,
+ [RPM_DIV_CLK1_AO] = &msm8996_div_clk1_ao,
+ [RPM_DIV_CLK2] = &msm8996_div_clk2,
+ [RPM_DIV_CLK2_AO] = &msm8996_div_clk2_ao,
+ [RPM_DIV_CLK3] = &msm8996_div_clk3,
+ [RPM_DIV_CLK3_AO] = &msm8996_div_clk3_ao,
+ [RPM_BB_CLK1_PIN] = &msm8996_bb_clk1_pin,
+ [RPM_BB_CLK1_A_PIN] = &msm8996_bb_clk1_a_pin,
+ [RPM_BB_CLK2_PIN] = &msm8996_bb_clk2_pin,
+ [RPM_BB_CLK2_A_PIN] = &msm8996_bb_clk2_a_pin,
+ [RPM_RF_CLK1_PIN] = &msm8996_rf_clk1_pin,
+ [RPM_RF_CLK1_A_PIN] = &msm8996_rf_clk1_a_pin,
+ [RPM_RF_CLK2_PIN] = &msm8996_rf_clk2_pin,
+ [RPM_RF_CLK2_A_PIN] = &msm8996_rf_clk2_a_pin,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
+ .clks = msm8996_clks,
+ .num_clks = ARRAY_SIZE(msm8996_clks),
+};
+
+static const struct of_device_id rpm_smd_clk_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916},
+ { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
+
+static int rpm_smd_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *rcc;
+ struct clk_onecell_data *data;
+ int ret;
+ size_t num_clks, i;
+ struct qcom_smd_rpm *rpm;
+ struct clk_smd_rpm **rpm_smd_clks;
+ const struct rpm_smd_clk_desc *desc;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rpm_smd_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_smd_clks[i]->rpm = rpm;
+
+ ret = clk_smd_rpm_handoff(rpm_smd_clks[i]);
+ if (ret)
+ goto err;
+ }
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_smd_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ clk = devm_clk_register(&pdev->dev, &rpm_smd_clks[i]->hw);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto err;
+ }
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_err(&pdev->dev, "Error registering SMD clock driver (%d)\n", ret);
+ return ret;
+}
+
+static int rpm_smd_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpm_smd_clk_driver = {
+ .driver = {
+ .name = "qcom-clk-smd-rpm",
+ .of_match_table = rpm_smd_clk_match_table,
+ },
+ .probe = rpm_smd_clk_probe,
+ .remove = rpm_smd_clk_remove,
+};
+
+static int __init rpm_smd_clk_init(void)
+{
+ return platform_driver_register(&rpm_smd_clk_driver);
+}
+core_initcall(rpm_smd_clk_init);
+
+static void __exit rpm_smd_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_smd_clk_driver);
+}
+module_exit(rpm_smd_clk_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM over SMD Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-clk-smd-rpm");
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 52a7d3959875b..c3848c19788ef 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll0 = {
@@ -113,6 +114,85 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -2800,6 +2880,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 9c29080a84d83..db9f6bdaec4c7 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -2598,7 +2598,7 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
.parent_names = (const char *[]){
- "bimc_ddr_clk_src",
+ "pcnoc_bfdcd_clk_src",
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -3081,6 +3081,7 @@ static struct gdsc oxili_gdsc = {
.name = "oxili",
},
.pwrsts = PWRSTS_OFF_ON,
+ .root_clock = GFX3D_CLK_SRC,
};
static struct clk_regmap *gcc_msm8916_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index eb551c75fba6a..c34484e445693 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll3 = {
@@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_8064_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3254,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll2_data = {
+ .mode_reg = 0x3280,
+ .l_reg = 0x3288,
+ .m_reg = 0x328c,
+ .n_reg = 0x3290,
+ .config_reg = 0x3284,
+ .status_reg = 0x329c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3294,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll2 = {
+ .d = &hfpll2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock),
+};
+
+static struct hfpll_data hfpll3_data = {
+ .mode_reg = 0x32c0,
+ .l_reg = 0x32c8,
+ .m_reg = 0x32cc,
+ .n_reg = 0x32d0,
+ .config_reg = 0x32c4,
+ .status_reg = 0x32dc,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x32d4,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll3 = {
+ .d = &hfpll3_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll3",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock),
+};
+
+static struct hfpll_data hfpll_l2_8064_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3400,
+ .l_reg = 0x3408,
+ .m_reg = 0x340c,
+ .n_reg = 0x3410,
+ .config_reg = 0x3404,
+ .status_reg = 0x341c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3414,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -3112,6 +3271,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_msm8960_resets[] = {
@@ -3323,6 +3485,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
+ [PLL16] = &hfpll2.clkr,
+ [PLL17] = &hfpll3.clkr,
};
static const struct qcom_reset_map gcc_apq8064_resets[] = {
@@ -3470,6 +3637,11 @@ static int gcc_msm8960_probe(struct platform_device *pdev)
if (!match)
return -EINVAL;
+ if (match->data == &gcc_apq8064_desc) {
+ hfpll1.d = &hfpll1_8064_data;
+ hfpll_l2.d = &hfpll_l2_8064_data;
+ }
+
ret = qcom_cc_register_board_clk(dev, "cxo_board", "cxo", 19200000);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index f12d7b2bddd70..2521d21138c24 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -12,11 +12,14 @@
*/
#include <linux/bitops.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -148,6 +151,9 @@ static int gdsc_enable(struct generic_pm_domain *domain)
if (sc->pwrsts == PWRSTS_ON)
return gdsc_deassert_reset(sc);
+ if (sc->root_clk)
+ clk_prepare_enable(sc->root_clk);
+
ret = gdsc_toggle_logic(sc, true);
if (ret)
return ret;
@@ -169,6 +175,7 @@ static int gdsc_enable(struct generic_pm_domain *domain)
static int gdsc_disable(struct generic_pm_domain *domain)
{
+ int ret;
struct gdsc *sc = domain_to_gdsc(domain);
if (sc->pwrsts == PWRSTS_ON)
@@ -177,9 +184,68 @@ static int gdsc_disable(struct generic_pm_domain *domain)
if (sc->pwrsts & PWRSTS_OFF)
gdsc_clear_mem_on(sc);
- return gdsc_toggle_logic(sc, false);
+ ret = gdsc_toggle_logic(sc, false);
+
+ if (sc->root_clk)
+ clk_disable_unprepare(sc->root_clk);
+
+ return ret;
}
+static inline bool match(unsigned int id, unsigned int *ids, unsigned int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ if (id == ids[i])
+ return true;
+ return false;
+}
+
+static int gdsc_attach(struct generic_pm_domain *domain, struct device *dev)
+{
+ int ret, i = 0, j = 0;
+ struct gdsc *sc = domain_to_gdsc(domain);
+ struct of_phandle_args clkspec;
+ struct device_node *np = dev->of_node;
+
+ if (!sc->clock_count)
+ return 0;
+
+ ret = pm_clk_create(dev);
+ if (ret) {
+ dev_dbg(dev, "pm_clk_create failed %d\n", ret);
+ return ret;
+ }
+
+ sc->clks = devm_kcalloc(dev, sc->clock_count, sizeof(sc->clks),
+ GFP_KERNEL);
+ if (!sc->clks)
+ return -ENOMEM;
+
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
+ &clkspec)) {
+ if (match(clkspec.args[0], sc->clocks, sc->clock_count)) {
+ sc->clks[j] = of_clk_get_from_provider(&clkspec);
+ pm_clk_add_clk(dev, sc->clks[j]);
+ j++;
+ } else if (clkspec.args[0] == sc->root_clock)
+ sc->root_clk = of_clk_get_from_provider(&clkspec);
+ i++;
+ }
+ return 0;
+};
+
+static void gdsc_detach(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ if (!sc->clock_count)
+ return;
+
+ pm_clk_destroy(dev);
+};
+
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
@@ -224,6 +290,9 @@ static int gdsc_init(struct gdsc *sc)
sc->pd.power_off = gdsc_disable;
sc->pd.power_on = gdsc_enable;
+ sc->pd.attach_dev = gdsc_attach;
+ sc->pd.detach_dev = gdsc_detach;
+ sc->pd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&sc->pd, NULL, !on);
return 0;
@@ -286,3 +355,76 @@ void gdsc_unregister(struct gdsc_desc *desc)
}
of_genpd_del_provider(dev->of_node);
}
+
+#ifndef CONFIG_PM
+static void enable_clock(struct of_phandle_args *clkspec)
+{
+ struct clk *clk;
+
+ clk = of_clk_get_from_provider(clkspec);
+ if (!IS_ERR(clk)) {
+ clk_prepare_enable(clk);
+ clk_put(clk);
+ }
+}
+
+static void disable_clock(struct of_phandle_args *clkspec)
+{
+ struct clk *clk;
+
+ clk = of_clk_get_from_provider(clkspec);
+ if (!IS_ERR(clk)) {
+ clk_disable_unprepare(clk);
+ clk_put(clk);
+ }
+}
+
+static int clk_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ int sz, i = 0;
+ struct device *dev = data;
+ struct gdsc_notifier_block *gdsc_nb;
+ struct of_phandle_args clkspec;
+ struct device_node *np = dev->of_node;
+
+ if (!of_find_property(dev->of_node, "power-domains", &sz))
+ return 0;
+
+ gdsc_nb = container_of(nb, struct gdsc_notifier_block, nb);
+
+ if (!gdsc_nb->clock_count)
+ return 0;
+
+ switch (action) {
+ case BUS_NOTIFY_BIND_DRIVER:
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells",
+ i, &clkspec)) {
+ if (match(clkspec.args[0], gdsc_nb->clocks,
+ gdsc_nb->clock_count))
+ enable_clock(&clkspec);
+ i++;
+ }
+ break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells",
+ i, &clkspec)) {
+ if (match(clkspec.args[0], gdsc_nb->clocks,
+ gdsc_nb->clock_count))
+ disable_clock(&clkspec);
+ i++;
+ }
+ break;
+ }
+ return 0;
+}
+
+void qcom_pm_add_notifier(struct gdsc_notifier_block *gdsc_nb)
+{
+ if (!gdsc_nb)
+ return;
+
+ gdsc_nb->nb.notifier_call = clk_notify,
+ bus_register_notifier(&platform_bus_type, &gdsc_nb->nb);
+}
+#endif
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 3bf497c36bdf8..cc52809bb5eea 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -17,6 +17,7 @@
#include <linux/err.h>
#include <linux/pm_domain.h>
+struct clk;
struct regmap;
struct reset_controller_dev;
@@ -32,6 +33,11 @@ struct reset_controller_dev;
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
+ * @clocks: ids of clocks associated with the gdsc
+ * @clock_count: number of @clocks
+ * @clks: clock pointers to gdsc clocks
+ * @root_clock: id of the root clock to be enabled
+ * @root_clk: root clk pointer
*/
struct gdsc {
struct generic_pm_domain pd;
@@ -53,6 +59,11 @@ struct gdsc {
struct reset_controller_dev *rcdev;
unsigned int *resets;
unsigned int reset_count;
+ unsigned int *clocks;
+ unsigned int clock_count;
+ struct clk **clks;
+ unsigned int root_clock;
+ struct clk *root_clk;
};
struct gdsc_desc {
@@ -75,4 +86,12 @@ static inline int gdsc_register(struct gdsc_desc *desc,
static inline void gdsc_unregister(struct gdsc_desc *desc) {};
#endif /* CONFIG_QCOM_GDSC */
+#ifndef CONFIG_PM
+struct gdsc_notifier_block {
+ struct notifier_block nb;
+ unsigned int *clocks;
+ unsigned int clock_count;
+};
+void qcom_pm_add_notifier(struct gdsc_notifier_block *);
+#endif /* !CONFIG_PM */
#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
new file mode 100644
index 0000000000000..1492f4c79c35b
--- /dev/null
+++ b/drivers/clk/qcom/hfpll.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+static const struct hfpll_data hdata = {
+ .mode_reg = 0x00,
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .user_reg = 0x10,
+ .config_reg = 0x14,
+ .config_val = 0x430405d,
+ .status_reg = 0x1c,
+ .lock_bit = 16,
+
+ .user_val = 0x8,
+ .user_vco_mask = 0x100000,
+ .low_vco_max_rate = 1248000000,
+ .min_rate = 537600000UL,
+ .max_rate = 2900000000UL,
+};
+
+static const struct of_device_id qcom_hfpll_match_table[] = {
+ { .compatible = "qcom,hfpll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
+
+static const struct regmap_config hfpll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30,
+ .fast_io = true,
+};
+
+static int qcom_hfpll_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_hfpll *h;
+ struct clk_init_data init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_ops_hfpll,
+ };
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_string_index(dev->of_node, "clock-output-names",
+ 0, &init.name))
+ return -ENODEV;
+
+ h->d = &hdata;
+ h->clkr.hw.init = &init;
+ spin_lock_init(&h->lock);
+
+ clk = devm_clk_register_regmap(&pdev->dev, &h->clkr);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct platform_driver qcom_hfpll_driver = {
+ .probe = qcom_hfpll_probe,
+ .driver = {
+ .name = "qcom-hfpll",
+ .of_match_table = qcom_hfpll_match_table,
+ },
+};
+module_platform_driver(qcom_hfpll_driver);
+
+MODULE_DESCRIPTION("QCOM HFPLL Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-hfpll");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
new file mode 100644
index 0000000000000..abf6bfd053c11
--- /dev/null
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+static const char *aux_parents[] = {
+ "pll8_vote",
+ "pxo",
+};
+
+static unsigned int aux_parent_map[] = {
+ 3,
+ 0,
+};
+
+static const struct of_device_id kpss_xcc_match_table[] = {
+ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL },
+ { .compatible = "qcom,kpss-gcc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
+
+static int kpss_xcc_driver_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+
+ id = of_match_device(kpss_xcc_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (id->data) {
+ if (of_property_read_string_index(pdev->dev.of_node,
+ "clock-output-names", 0, &name))
+ return -ENODEV;
+ base += 0x14;
+ } else {
+ name = "acpu_l2_aux";
+ base += 0x28;
+ }
+
+ clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
+ 0, aux_parent_map, NULL);
+
+ platform_set_drvdata(pdev, clk);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int kpss_xcc_driver_remove(struct platform_device *pdev)
+{
+ clk_unregister_mux(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver kpss_xcc_driver = {
+ .probe = kpss_xcc_driver_probe,
+ .remove = kpss_xcc_driver_remove,
+ .driver = {
+ .name = "kpss-xcc",
+ .of_match_table = kpss_xcc_match_table,
+ },
+};
+module_platform_driver(kpss_xcc_driver);
+
+MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kpss-xcc");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
new file mode 100644
index 0000000000000..5dc4d811b86e8
--- /dev/null
+++ b/drivers/clk/qcom/krait-cc.c
@@ -0,0 +1,363 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+#include "clk-krait.h"
+
+static unsigned int sec_mux_map[] = {
+ 2,
+ 0,
+};
+
+static unsigned int pri_mux_map[] = {
+ 1,
+ 2,
+ 0,
+};
+
+static int
+krait_add_div(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_div2_clk *div;
+ struct clk_init_data init = {
+ .num_parents = 1,
+ .ops = &krait_div2_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ const char *p_names[1];
+ struct clk *clk;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ div->width = 2;
+ div->shift = 6;
+ div->lpl = id >= 0;
+ div->offset = offset;
+ div->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ init.parent_names = p_names;
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ kfree(init.name);
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_register(dev, &div->hw);
+ kfree(p_names[0]);
+ kfree(init.name);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int
+krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned offset,
+ bool unique_aux)
+{
+ struct krait_mux_clk *mux;
+ static const char *sec_mux_list[] = {
+ "acpu_aux",
+ "qsb",
+ };
+ struct clk_init_data init = {
+ .parent_names = sec_mux_list,
+ .num_parents = ARRAY_SIZE(sec_mux_list),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->has_safe_parent = true;
+ mux->safe_sel = 2;
+ mux->mask = 0x3;
+ mux->shift = 2;
+ mux->parent_map = sec_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ if (unique_aux) {
+ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
+ if (!sec_mux_list[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_aux;
+ }
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ if (unique_aux)
+ kfree(sec_mux_list[0]);
+err_aux:
+ kfree(init.name);
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct clk *
+krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_mux_clk *mux;
+ const char *p_names[3];
+ struct clk_init_data init = {
+ .parent_names = p_names,
+ .num_parents = ARRAY_SIZE(p_names),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->has_safe_parent = true;
+ mux->safe_sel = 0;
+ mux->mask = 0x3;
+ mux->shift = 0;
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->parent_map = pri_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
+
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p0;
+ }
+
+ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!p_names[1]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p1;
+ }
+
+ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!p_names[2]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p2;
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ kfree(p_names[2]);
+err_p2:
+ kfree(p_names[1]);
+err_p1:
+ kfree(p_names[0]);
+err_p0:
+ kfree(init.name);
+ return clk;
+}
+
+/* id < 0 for L2, otherwise id == physical CPU number */
+static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
+{
+ int ret;
+ unsigned offset;
+ void *p = NULL;
+ const char *s;
+ struct clk *clk;
+
+ if (id >= 0) {
+ offset = 0x4501 + (0x1000 * id);
+ s = p = kasprintf(GFP_KERNEL, "%d", id);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ offset = 0x500;
+ s = "_l2";
+ }
+
+ ret = krait_add_div(dev, id, s, offset);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ clk = krait_add_pri_mux(dev, id, s, offset);
+err:
+ kfree(p);
+ return clk;
+}
+
+static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct clk **clks = data;
+
+ if (idx >= 5) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clks[idx] ? : ERR_PTR(-ENODEV);
+}
+
+static const struct of_device_id krait_cc_match_table[] = {
+ { .compatible = "qcom,krait-cc-v1", (void *)1UL },
+ { .compatible = "qcom,krait-cc-v2" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, krait_cc_match_table);
+
+static int krait_cc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ unsigned long cur_rate, aux_rate;
+ int cpu;
+ struct clk *clk;
+ struct clk **clks;
+ struct clk *l2_pri_mux_clk;
+
+ id = of_match_device(krait_cc_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
+ clk = clk_register_fixed_rate(dev, "qsb", NULL, CLK_IS_ROOT, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!id->data) {
+ clk = clk_register_fixed_factor(dev, "acpu_aux",
+ "gpll0_vote", 0, 1, 2);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ /* Krait configurations have at most 4 CPUs and one L2 */
+ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ clk = krait_add_clks(dev, cpu, id->data);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[cpu] = clk;
+ }
+
+ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
+ if (IS_ERR(l2_pri_mux_clk))
+ return PTR_ERR(l2_pri_mux_clk);
+ clks[4] = l2_pri_mux_clk;
+
+ /*
+ * We don't want the CPU or L2 clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ for_each_online_cpu(cpu) {
+ clk_prepare_enable(l2_pri_mux_clk);
+ WARN(clk_prepare_enable(clks[cpu]),
+ "Unable to turn on CPU%d clock", cpu);
+ }
+
+ /*
+ * Force reinit of HFPLLs and muxes to overwrite any potential
+ * incorrect configuration of HFPLLs and muxes by the bootloader.
+ * While at it, also make sure the cores are running at known rates
+ * and print the current rate.
+ *
+ * The clocks are set to aux clock rate first to make sure the
+ * secondary mux is not sourcing off of QSB. The rate is then set to
+ * two different rates to force a HFPLL reinit under all
+ * circumstances.
+ */
+ cur_rate = clk_get_rate(l2_pri_mux_clk);
+ aux_rate = 384000000;
+ if (cur_rate == 1) {
+ pr_info("L2 @ QSB rate. Forcing new rate.\n");
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(l2_pri_mux_clk, aux_rate);
+ clk_set_rate(l2_pri_mux_clk, 2);
+ clk_set_rate(l2_pri_mux_clk, cur_rate);
+ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
+ for_each_possible_cpu(cpu) {
+ clk = clks[cpu];
+ cur_rate = clk_get_rate(clk);
+ if (cur_rate == 1) {
+ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(clk, aux_rate);
+ clk_set_rate(clk, 2);
+ clk_set_rate(clk, cur_rate);
+ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
+ }
+
+ of_clk_add_provider(dev->of_node, krait_of_get, clks);
+
+ return 0;
+}
+
+static struct platform_driver krait_cc_driver = {
+ .probe = krait_cc_probe,
+ .driver = {
+ .name = "krait-cc",
+ .of_match_table = krait_cc_match_table,
+ },
+};
+
+static int __init krait_cc_init(void)
+{
+ return platform_driver_register(&krait_cc_driver);
+}
+subsys_initcall(krait_cc_init);
+
+static void __exit krait_cc_exit(void)
+{
+ platform_driver_unregister(&krait_cc_driver);
+}
+module_exit(krait_cc_exit);
+
+MODULE_DESCRIPTION("Krait CPU Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:krait-cc");
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 715e7cd94125e..14b6346e857f6 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -462,18 +462,6 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct clk_rcg2 gfx3d_clk_src = {
- .cmd_rcgr = 0x4000,
- .hid_width = 5,
- .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gfx3d_clk_src",
- .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
- .num_parents = 5,
- .ops = &clk_rcg2_ops,
- },
-};
-
static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
F(75000000, P_GPLL0, 8, 0, 0),
F(133330000, P_GPLL0, 4.5, 0, 0),
@@ -2421,7 +2409,6 @@ static struct clk_regmap *mmcc_msm8974_clocks[] = {
[VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
[VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
[MDP_CLK_SRC] = &mdp_clk_src.clkr,
- [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
[JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
[JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
[JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 6df7ff36b4161..18220458ee6ce 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -1279,21 +1279,6 @@ static struct clk_branch mmss_misc_cxo_clk = {
},
};
-static struct clk_branch mmss_mmagic_axi_clk = {
- .halt_reg = 0x506c,
- .clkr = {
- .enable_reg = 0x506c,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmss_mmagic_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch mmss_mmagic_maxi_clk = {
.halt_reg = 0x5074,
.clkr = {
@@ -1579,21 +1564,6 @@ static struct clk_branch smmu_video_axi_clk = {
},
};
-static struct clk_branch mmagic_bimc_axi_clk = {
- .halt_reg = 0x5294,
- .clkr = {
- .enable_reg = 0x5294,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "mmagic_bimc_axi_clk",
- .parent_names = (const char *[]){ "axi_clk_src" },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch mmagic_bimc_noc_cfg_ahb_clk = {
.halt_reg = 0x5298,
.clkr = {
@@ -2918,6 +2888,14 @@ static struct clk_hw *mmcc_msm8996_hws[] = {
&gpll0_div.hw,
};
+static struct gdsc mmagic_bimc_gdsc = {
+ .gdscr = 0x529c,
+ .pd = {
+ .name = "mmagic_bimc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
static struct gdsc mmagic_video_gdsc = {
.gdscr = 0x119c,
.gds_hw_ctrl = 0x120c,
@@ -3121,7 +3099,6 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMSS_MMAGIC_CFG_AHB_CLK] = &mmss_mmagic_cfg_ahb_clk.clkr,
[MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
[MMSS_MISC_CXO_CLK] = &mmss_misc_cxo_clk.clkr,
- [MMSS_MMAGIC_AXI_CLK] = &mmss_mmagic_axi_clk.clkr,
[MMSS_MMAGIC_MAXI_CLK] = &mmss_mmagic_maxi_clk.clkr,
[MMAGIC_CAMSS_AXI_CLK] = &mmagic_camss_axi_clk.clkr,
[MMAGIC_CAMSS_NOC_CFG_AHB_CLK] = &mmagic_camss_noc_cfg_ahb_clk.clkr,
@@ -3141,7 +3118,6 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
[MMAGIC_VIDEO_NOC_CFG_AHB_CLK] = &mmagic_video_noc_cfg_ahb_clk.clkr,
[SMMU_VIDEO_AHB_CLK] = &smmu_video_ahb_clk.clkr,
[SMMU_VIDEO_AXI_CLK] = &smmu_video_axi_clk.clkr,
- [MMAGIC_BIMC_AXI_CLK] = &mmagic_bimc_axi_clk.clkr,
[MMAGIC_BIMC_NOC_CFG_AHB_CLK] = &mmagic_bimc_noc_cfg_ahb_clk.clkr,
[GPU_GX_GFX3D_CLK] = &gpu_gx_gfx3d_clk.clkr,
[GPU_GX_RBBMTIMER_CLK] = &gpu_gx_rbbmtimer_clk.clkr,
@@ -3233,6 +3209,7 @@ static struct clk_regmap *mmcc_msm8996_clocks[] = {
};
static struct gdsc *mmcc_msm8996_gdscs[] = {
+ [MMAGIC_BIMC_GDSC] = &mmagic_bimc_gdsc,
[MMAGIC_VIDEO_GDSC] = &mmagic_video_gdsc,
[MMAGIC_MDSS_GDSC] = &mmagic_mdss_gdsc,
[MMAGIC_CAMSS_GDSC] = &mmagic_camss_gdsc,
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 14b1f9393b057..8a99dea92021b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -97,6 +97,15 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
+config ARM_QCOM_CPUFREQ
+ tristate "Qualcomm based"
+ depends on ARCH_QCOM
+ select PM_OPP
+ help
+ This adds the CPUFreq driver for Qualcomm SoC based boards.
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9e63fb1b09f81..58d1ca53ba915 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -61,6 +61,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ) += qcom-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 5f8dbe640a202..1a6df11f65f34 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -30,6 +30,9 @@ struct private_data {
struct device *cpu_dev;
struct thermal_cooling_device *cdev;
const char *reg_name;
+ struct notifier_block opp_nb;
+ struct mutex lock;
+ unsigned long opp_freq;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -41,9 +44,46 @@ static struct freq_attr *cpufreq_dt_attr[] = {
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct private_data *priv = policy->driver_data;
+ int ret;
+ unsigned long target_freq = policy->freq_table[index].frequency * 1000;
+ struct clk *l2_clk = policy->l2_clk;
+ unsigned int l2_freq;
+ unsigned long new_l2_freq = 0;
+
+ mutex_lock(&priv->lock);
+ ret = dev_pm_opp_set_rate(priv->cpu_dev, target_freq);
+
+ if (!ret) {
+ if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] &&
+ policy->l2_rate[2]) {
+ static unsigned long krait_l2[CONFIG_NR_CPUS] = { };
+ int cpu, ret = 0;
+
+ if (target_freq >= policy->l2_rate[2])
+ new_l2_freq = policy->l2_rate[2];
+ else if (target_freq >= policy->l2_rate[1])
+ new_l2_freq = policy->l2_rate[1];
+ else
+ new_l2_freq = policy->l2_rate[0];
+
+ krait_l2[policy->cpu] = new_l2_freq;
+ for_each_present_cpu(cpu)
+ new_l2_freq = max(new_l2_freq, krait_l2[cpu]);
+
+ l2_freq = clk_get_rate(l2_clk);
+
+ if (l2_freq != new_l2_freq) {
+ /* scale l2 with the core */
+ ret = clk_set_rate(l2_clk, new_l2_freq);
+ }
+ }
+
+ priv->opp_freq = target_freq;
+ }
- return dev_pm_opp_set_rate(priv->cpu_dev,
- policy->freq_table[index].frequency * 1000);
+ mutex_unlock(&priv->lock);
+
+ return ret;
}
/*
@@ -84,6 +124,42 @@ node_put:
return name;
}
+static int opp_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct private_data *priv = container_of(nb, struct private_data,
+ opp_nb);
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg;
+ unsigned long volt, freq;
+ int ret = 0;
+
+ if (event == OPP_EVENT_ADJUST_VOLTAGE) {
+ cpu_reg = dev_pm_opp_get_regulator(cpu_dev);
+ if (IS_ERR(cpu_reg)) {
+ ret = PTR_ERR(cpu_reg);
+ goto out;
+ }
+ rcu_read_lock();
+ volt = dev_pm_opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ rcu_read_unlock();
+
+ mutex_lock(&priv->lock);
+ if (freq == priv->opp_freq) {
+ ret = regulator_set_voltage_triplet(cpu_reg, volt, volt, volt);
+
+ }
+ mutex_unlock(&priv->lock);
+ if (ret)
+ dev_err(cpu_dev, "failed to scale voltage: %d\n", ret);
+ }
+
+out:
+ return notifier_from_errno(ret);
+}
+
static int resources_available(void)
{
struct device *cpu_dev;
@@ -150,6 +226,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
bool opp_v1 = false;
const char *name;
int ret;
+ struct srcu_notifier_head *opp_srcu_head;
+ struct device_node *l2_np;
+ struct clk *l2_clk = NULL;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
@@ -236,12 +315,28 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
+ mutex_init(&priv->lock);
+
+ rcu_read_lock();
+ opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev);
+ if (IS_ERR(opp_srcu_head)) {
+ ret = PTR_ERR(opp_srcu_head);
+ rcu_read_unlock();
+ goto out_free_priv;
+ }
+
+ priv->opp_nb.notifier_call = opp_notifier;
+ ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb);
+ rcu_read_unlock();
+ if (ret)
+ goto out_free_priv;
+
priv->reg_name = name;
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
- dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ pr_err("failed to init cpufreq table: %d\n", ret);
+ goto out_unregister_nb;
}
priv->cpu_dev = cpu_dev;
@@ -254,6 +349,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
+ l2_clk = clk_get(cpu_dev, "l2");
+ if (!IS_ERR(l2_clk))
+ policy->l2_clk = l2_clk;
+ l2_np = of_find_node_by_name(NULL, "qcom,l2");
+ if (l2_np)
+ of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3);
+
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -280,6 +382,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_unregister_nb:
+ srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb);
out_free_priv:
kfree(priv);
out_free_opp:
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 0000000000000..c9f86a062cdd0
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/cpufreq-dt.h>
+
+static void __init get_krait_bin_format_a(int *speed, int *pvs, int *pvs_ver)
+{
+ void __iomem *base;
+ u32 pte_efuse;
+
+ *speed = *pvs = *pvs_ver = 0;
+
+ base = ioremap(0x007000c0, 4);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ iounmap(base);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ pr_warn("Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ pr_info("Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ pr_warn("PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ pr_info("PVS bin: %d\n", *pvs);
+ }
+}
+
+static void __init get_krait_bin_format_b(int *speed, int *pvs, int *pvs_ver)
+{
+ u32 pte_efuse, redundant_sel;
+ void __iomem *base;
+
+ *speed = 0;
+ *pvs = 0;
+ *pvs_ver = 0;
+
+ base = ioremap(0xfc4b80b0, 8);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+ *speed = pte_efuse & 0x7;
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ break;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ pr_info("Speed bin: %d\n", *speed);
+ } else {
+ pr_warn("Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = readl_relaxed(base + 0x4) & BIT(21);
+ if (pte_efuse) {
+ pr_info("PVS bin: %d\n", *pvs);
+ } else {
+ pr_warn("PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ pr_info("PVS version: %d\n", *pvs_ver);
+ iounmap(base);
+}
+
+static int __init qcom_cpufreq_populate_opps(void)
+{
+ int len, rows, cols, i, k, speed, pvs, pvs_ver;
+ char table_name[] = "qcom,speedXX-pvsXX-bin-vXX";
+ struct device_node *np;
+ struct device *dev;
+ int cpu = 0;
+
+ np = of_find_node_by_name(NULL, "qcom,pvs");
+ if (!np)
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "qcom,pvs-format-a")) {
+ get_krait_bin_format_a(&speed, &pvs, &pvs_ver);
+ cols = 2;
+ } else if (of_property_read_bool(np, "qcom,pvs-format-b")) {
+ get_krait_bin_format_b(&speed, &pvs, &pvs_ver);
+ cols = 3;
+ } else {
+ return -ENODEV;
+ }
+
+ snprintf(table_name, sizeof(table_name),
+ "qcom,speed%d-pvs%d-bin-v%d", speed, pvs, pvs_ver);
+
+ if (!of_find_property(np, table_name, &len))
+ return -EINVAL;
+
+ len /= sizeof(u32);
+ if (len % cols || len == 0)
+ return -EINVAL;
+
+ rows = len / cols;
+
+ for (i = 0, k = 0; i < rows; i++) {
+ u32 freq, volt;
+
+ of_property_read_u32_index(np, table_name, k++, &freq);
+ of_property_read_u32_index(np, table_name, k++, &volt);
+ while (k % cols)
+ k++; /* Skip uA entries if present */
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+ if (dev_pm_opp_add(dev, freq, volt))
+ pr_warn("failed to add OPP %u\n", freq);
+ }
+ }
+
+ return 0;
+}
+
+static int __init qcom_cpufreq_driver_init(void)
+{
+ struct cpufreq_dt_platform_data pdata = { .independent_clocks = true };
+ struct platform_device_info devinfo = {
+ .name = "cpufreq-dt",
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ };
+ struct device *cpu_dev;
+ struct device_node *np;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -ENOENT;
+
+ if (!of_device_is_compatible(np, "qcom,krait")) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ ret = qcom_cpufreq_populate_opps();
+ if (ret)
+ return ret;
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+module_init(qcom_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 0883292f640f4..643d930c4e323 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -28,6 +28,13 @@
#include "qcom_scm.h"
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+
#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
@@ -38,6 +45,15 @@
#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+
struct qcom_scm_entry {
int flag;
void *entry;
@@ -168,23 +184,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
-static int qcom_scm_remap_error(int err)
-{
- pr_err("qcom_scm_call failed with error code %d\n", err);
- switch (err) {
- case QCOM_SCM_ERROR:
- return -EIO;
- case QCOM_SCM_EINVAL_ADDR:
- case QCOM_SCM_EINVAL_ARG:
- return -EINVAL;
- case QCOM_SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case QCOM_SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
static u32 smc(u32 cmd_addr)
{
int context_id;
@@ -499,3 +498,314 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+
+int __qcom_scm_restart_proc(u32 proc_id, int restart, u32 *resp)
+{
+
+ return qcom_scm_call(QCOM_SCM_SVC_PIL, proc_id,
+ &restart, sizeof(restart),
+ &resp, sizeof(resp));
+}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 image_addr;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.image_addr = cpu_to_le32(metadata_phys);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ __le32 scm_ret;
+ int ret;
+ struct {
+ __le32 proc;
+ __le32 addr;
+ __le32 len;
+ } request;
+
+ request.proc = cpu_to_le32(peripheral);
+ request.addr = cpu_to_le32(addr);
+ request.len = cpu_to_le32(size);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ __le32 out;
+ __le32 in;
+ int ret;
+
+ in = cpu_to_le32(peripheral);
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &in, sizeof(in),
+ &out, sizeof(out));
+
+ return ret ? : le32_to_cpu(out);
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0x0c
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct {
+ u32 id;
+ u32 cb_num;
+ u32 buff;
+ u32 len;
+ } req;
+ int resp = 0;
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
+ &req, sizeof(req), &resp, 1);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct {
+ u32 size;
+ u32 spare;
+ } req;
+ int retval;
+
+ req.size = size;
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE,
+ &req, sizeof(req), &retval, sizeof(retval));
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct {
+ u32 spare;
+ } req;
+
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &req,
+ sizeof(req), psize, sizeof(psize));
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct {
+ u32 addr;
+ u32 size;
+ u32 spare;
+ } req = {0};
+ int ret, ptbl_ret = 0;
+
+ req.addr = addr;
+ req.size = size;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &req,
+ sizeof(req), &ptbl_ret, sizeof(ptbl_ret));
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct {
+ struct {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+ } plist;
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ u32 resp;
+ int ret;
+
+ req.plist.list = list;
+ req.plist.list_size = list_size;
+ req.plist.size = size;
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = info_size;
+ req.flags = flags;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &req, sizeof(req),
+ &resp, sizeof(resp));
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct {
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ int ret, scm_ret;
+
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = size;
+ req.flags = flags;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ int ret;
+
+ if (__qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+ u32 version;
+
+ if (!qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
+ sizeof(feat), &version, sizeof(version)))
+ return version;
+ }
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct {
+ u32 device_id;
+ u32 spare;
+ } req;
+ int ret, scm_ret = 0;
+
+ req.device_id = device_id;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &req, sizeof(req),
+ scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct {
+ u32 state;
+ u32 spare;
+ } req;
+ int scm_ret = 0;
+ int ret;
+
+ req.state = state;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+ } req;
+ int ret, scm_ret;
+
+ req.cp_start = start;
+ req.cp_size = size;
+ req.cp_nonpixel_start = nonpixel_start;
+ req.cp_nonpixel_size = nonpixel_size;
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_init(void)
+{
+ return 0;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index bb6555f6d63b8..b213d1c082716 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -8,56 +8,775 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
*/
-#include <linux/io.h>
-#include <linux/errno.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+#include <asm/smp_plat.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000)
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0xff) << 4) | \
+ (((b) & 0xff) << 6) | \
+ (((c) & 0xff) << 8) | \
+ (((d) & 0xff) << 10) | \
+ (((e) & 0xff) << 12) | \
+ (((f) & 0xff) << 14) | \
+ (((g) & 0xff) << 16) | \
+ (((h) & 0xff) << 18) | \
+ (((i) & 0xff) << 20) | \
+ (((j) & 0xff) << 22) | \
+ (num & 0xffff))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @ret: The values returned by the secure syscall
+ * @extra_arg_buf: The buffer containing extra arguments
+ (that don't fit in available registers)
+ * @x5: The 4rd argument to the secure syscall or physical address of
+ extra_arg_buf
*/
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+struct qcom_scm_desc {
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u64 ret[MAX_QCOM_SCM_RETS];
+
+ /* private */
+ void *extra_arg_buf;
+ u64 x5;
+};
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+int __qcom_scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
{
- return -ENOTSUPP;
+ register u64 r0 asm("r0") = x0;
+ register u64 r1 asm("r1") = x1;
+ register u64 r2 asm("r2") = x2;
+ register u64 r3 asm("r3") = x3;
+ register u64 r4 asm("r4") = x4;
+ register u64 r5 asm("r5") = x5;
+ register u64 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
}
-/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+int __qcom_scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
+{
+ register u32 r0 asm("r0") = w0;
+ register u32 r1 asm("r1") = w1;
+ register u32 r2 asm("r2") = w2;
+ register u32 r3 asm("r3") = w3;
+ register u32 r4 asm("r4") = w4;
+ register u32 r5 asm("r5") = w5;
+ register u32 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
+}
+
+struct qcom_scm_extra_arg {
+ union {
+ u32 args32[N_EXT_QCOM_SCM_ARGS];
+ u64 args64[N_EXT_QCOM_SCM_ARGS];
+ };
+};
+
+static enum qcom_scm_interface_version {
+ QCOM_SCM_UNKNOWN,
+ QCOM_SCM_LEGACY,
+ QCOM_SCM_ARMV8_32,
+ QCOM_SCM_ARMV8_64,
+} qcom_scm_version = QCOM_SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 qcom_scm_version_mask;
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
*/
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+static int allocate_extra_arg_buffer(struct qcom_scm_desc *desc, gfp_t flags)
{
- return -ENOTSUPP;
+ int i, j;
+ struct qcom_scm_extra_arg *argbuf;
+ int arglen = desc->arginfo & 0xf;
+ size_t argbuflen = PAGE_ALIGN(sizeof(struct qcom_scm_extra_arg));
+
+ desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+ if (likely(arglen <= N_REGISTER_ARGS)) {
+ desc->extra_arg_buf = NULL;
+ return 0;
+ }
+
+ argbuf = kzalloc(argbuflen, flags);
+ if (!argbuf) {
+ pr_err("qcom_scm_call: failed to alloc mem for extended argument buffer\n");
+ return -ENOMEM;
+ }
+
+ desc->extra_arg_buf = argbuf;
+
+ j = FIRST_EXT_ARG_IDX;
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args64[i] = desc->args[j++];
+ else
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args32[i] = desc->args[j++];
+ desc->x5 = virt_to_phys(argbuf);
+ __flush_dcache_area(argbuf, argbuflen);
+
+ return 0;
}
/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
*
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
- */
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking qcom_scm_call,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by qcom_scm_call; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+static int qcom_scm_call(u32 svc_id, u32 cmd_id, struct qcom_scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret, retry_count = 0;
+ u32 fn_id = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | qcom_scm_version_mask;
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ mutex_unlock(&qcom_scm_lock);
+
+ if (ret == QCOM_SCM_V2_EBUSY)
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ } while (ret == QCOM_SCM_V2_EBUSY && (retry_count++ < QCOM_SCM_EBUSY_MAX_RETRY));
+
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return 0;
+}
+
+static int qcom_scm_call_atomic(u32 s, u32 c, struct qcom_scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret;
+ u32 fn_id = QCOM_SCM_SIP_FNID(s, c);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | qcom_scm_version_mask;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return ret;
+}
+
+static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, int flags)
+{
+ struct qcom_scm_desc desc = {0};
+ unsigned int cpu = cpumask_first(cpus);
+ u64 mpidr_el1 = cpu_logical_map(cpu);
+
+ /* For now we assume only a single cpu is set in the mask */
+ WARN_ON(cpumask_weight(cpus) != 1);
+
+ if (mpidr_el1 & ~MPIDR_HWID_BITMASK) {
+ pr_err("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ desc.args[0] = virt_to_phys(entry);
+ desc.args[1] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 0));
+ desc.args[2] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 1));
+ desc.args[3] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 2));
+ desc.args[4] = ~0ULL;
+ desc.args[5] = QCOM_SCM_FLAG_HLOS | flags;
+ desc.arginfo = QCOM_SCM_ARGS(6);
+
+ return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR_MC, &desc);
+}
+
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_COLDBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_WARMBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
void __qcom_scm_cpu_power_down(u32 flags)
{
+ struct qcom_scm_desc desc = {0};
+ desc.args[0] = QCOM_SCM_CMD_CORE_HOTPLUGGED |
+ (flags & QCOM_SCM_FLUSH_FLAG_MASK);
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ qcom_scm_call_atomic(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, &desc);
}
int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ desc.args[0] = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &desc);
+
+ if (ret)
+ return ret;
+
+ return desc.ret[0];
}
int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = QCOM_SCM_ARGS(10);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc);
+ *resp = desc.ret[0];
+
+ return ret;
+}
+
+int __qcom_scm_restart_proc(u32 proc_id, int restart, u32 *resp)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = restart;
+ desc.args[1] = 0;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, proc_id,
+ &desc);
+ *resp = desc.ret[0];
+
+ return ret;
+}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &desc);
+
+ return ret ? false : !!desc.ret[0];
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+ int ret;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = metadata_phys;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0xc
+
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = context;
+ desc.args[2] = addr;
+ desc.args[3] = len;
+ desc.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL);
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS, &desc);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = size;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE, &desc);
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ desc.args[0] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &desc);
+
+ psize[0] = desc.ret[0];
+ psize[1] = desc.ret[1];
+
+ return ret;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ u64 ptbl_ret;
+
+ desc.args[0] = addr;
+ desc.args[1] = size;
+ desc.args[2] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &desc);
+
+ ptbl_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+ u32 resp;
+ int ret;
+
+ desc.args[0] = list;
+ desc.args[1] = list_size;
+ desc.args[2] = size;
+ desc.args[3] = id;
+ desc.args[4] = ctx_id;
+ desc.args[5] = va;
+ desc.args[6] = info_size;
+ desc.args[7] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(8, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT, &desc);
+
+ resp = desc.ret[0];
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = ctx_id;
+ desc.args[2] = va;
+ desc.args[3] = size;
+ desc.args[4] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(5);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2_FLAT, &desc);
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+
+ ret = __qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+ if (ret <= 0)
+ return 0;
+
+ desc.args[0] = feat;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &desc);
+ if (!ret)
+ return desc.ret[0];
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret = 0;
+
+ desc.args[0] = device_id;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = state;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = start;
+ desc.args[1] = size;
+ desc.args[2] = nonpixel_start;
+ desc.args[3] = nonpixel_size;
+ desc.arginfo = QCOM_SCM_ARGS(4);
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define QCOM_SCM_SVC_INFO 0x6
+int __qcom_scm_init(void)
+{
+ int ret;
+ u64 ret1 = 0, x0;
+
+ /* First try a SMC64 call */
+ qcom_scm_version = QCOM_SCM_ARMV8_64;
+ x0 = QCOM_SCM_SIP_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+ ret = __qcom_scm_call_armv8_64(x0 | SMC64_MASK, QCOM_SCM_ARGS(1), x0, 0, 0, 0,
+ &ret1, NULL, NULL);
+ if (ret || !ret1) {
+ /* Try SMC32 call */
+ ret1 = 0;
+ ret = __qcom_scm_call_armv8_32(x0, QCOM_SCM_ARGS(1), x0, 0, 0,
+ 0, &ret1, NULL, NULL);
+ if (ret || !ret1)
+ qcom_scm_version = QCOM_SCM_LEGACY;
+ else
+ qcom_scm_version = QCOM_SCM_ARMV8_32;
+ } else
+ qcom_scm_version_mask = SMC64_MASK;
+
+ pr_debug("qcom_scm_call: qcom_scm version is %x, mask is %x\n",
+ qcom_scm_version, qcom_scm_version_mask);
+
+ return 0;
}
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 45c008d688914..2e29629a65034 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -10,19 +10,72 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
*/
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/cpumask.h>
#include <linux/export.h>
+#include <linux/dma-mapping.h>
#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
#include "qcom_scm.h"
+struct qcom_scm {
+ struct device *dev;
+ struct clk *core_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+ int ret;
+
+ if(__scm->core_clk) {
+ ret = clk_prepare_enable(__scm->core_clk);
+ if (ret)
+ goto bail;
+ }
+
+ if(__scm->iface_clk) {
+ ret = clk_prepare_enable(__scm->iface_clk);
+ if (ret)
+ goto disable_core;
+ }
+
+ if(__scm->bus_clk) {
+ ret = clk_prepare_enable(__scm->bus_clk);
+ if (ret)
+ goto disable_iface;
+ }
+
+ return 0;
+
+disable_iface:
+ if(__scm->iface_clk)
+ clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+ if(__scm->core_clk)
+ clk_disable_unprepare(__scm->core_clk);
+bail:
+ return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+ if(__scm->core_clk)
+ clk_disable_unprepare(__scm->core_clk);
+ if(__scm->iface_clk)
+ clk_disable_unprepare(__scm->iface_clk);
+ if(__scm->bus_clk)
+ clk_disable_unprepare(__scm->bus_clk);
+}
+
/**
* qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
* @entry: Entry point function for the cpus
@@ -72,11 +125,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
*/
bool qcom_scm_hdcp_available(void)
{
- int ret;
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ goto clk_err;
ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
- QCOM_SCM_CMD_HDCP);
+ QCOM_SCM_CMD_HDCP);
+ qcom_scm_clk_disable();
+
+clk_err:
return (ret > 0) ? true : false;
}
EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -91,6 +150,304 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
*/
int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return __qcom_scm_hdcp_req(req, req_cnt, resp);
+ int ret = qcom_scm_clk_enable();
+
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_hdcp_req(req, req_cnt, resp);
+ qcom_scm_clk_disable();
+ return ret;
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+int qcom_scm_restart_proc(u32 pid, int restart, u32 *resp)
+{
+ return __qcom_scm_restart_proc(pid, restart, resp);
+}
+EXPORT_SYMBOL(qcom_scm_restart_proc);
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ int ret;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL);
+ if (!mdata_buf) {
+ dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ goto free_metadata;
+
+ ret = __qcom_scm_pas_init_image(peripheral, mdata_phys);
+
+ qcom_scm_clk_disable();
+
+free_metadata:
+ dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_mem_setup(peripheral, addr, size);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_auth_and_reset(peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+
+ ret = qcom_scm_clk_enable();
+ if (ret)
+ return ret;
+
+ ret = __qcom_scm_pas_shutdown(peripheral);
+ qcom_scm_clk_disable();
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ return __qcom_scm_iommu_dump_fault_regs(id, context, addr, len);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_dump_fault_regs);
+
+int qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_set_cp_pool_size(size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ return __qcom_scm_iommu_secure_ptbl_size(spare, psize);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_secure_ptbl_init(addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ return __qcom_scm_iommu_secure_map(list, list_size, size, id,
+ ctx_id, va, info_size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_map);
+
+int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va, u32 size, u32 flags)
+{
+ return __qcom_scm_iommu_secure_unmap(id, ctx_id, va, size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_unmap);
+
+int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return __qcom_scm_is_call_available(svc_id, cmd_id);
+}
+EXPORT_SYMBOL(qcom_scm_is_call_available);
+
+int qcom_scm_get_feat_version(u32 feat)
+{
+ return __qcom_scm_get_feat_version(feat);
+}
+EXPORT_SYMBOL(qcom_scm_get_feat_version);
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ return __qcom_scm_restore_sec_cfg(device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ return __qcom_scm_set_video_state(state, spare);
+}
+EXPORT_SYMBOL(qcom_scm_set_video_state);
+
+int qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ return __qcom_scm_mem_protect_video_var(start, size, nonpixel_start,
+ nonpixel_size);
+}
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+ return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int __init qcom_scm_init(void)
+{
+ return __qcom_scm_init();
+}
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+ struct qcom_scm *scm;
+ long rate;
+ int ret;
+
+ ret = qcom_scm_init();
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+ if (!scm)
+ return -ENOMEM;
+
+ scm->dev = &pdev->dev;
+
+ scm->core_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(scm->core_clk)) {
+ if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire core clk\n");
+ scm->core_clk = NULL;
+ }
+
+ scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(scm->iface_clk)) {
+ if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire iface clk\n");
+ scm->iface_clk = NULL;
+ }
+
+ scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+ if (IS_ERR(scm->bus_clk)) {
+ if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire bus clk\n");
+
+ scm->bus_clk = NULL;
+ }
+
+ if (scm->core_clk) {
+ /* vote for max clk rate for highest performance */
+ rate = clk_round_rate(scm->core_clk, INT_MAX);
+ ret = clk_set_rate(scm->core_clk, rate);
+ if (ret)
+ return ret;
+ }
+
+ __scm = scm;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+ { .compatible = "qcom,scm",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+ .driver = {
+ .name = "qcom_scm",
+ .of_match_table = qcom_scm_dt_match,
+ },
+ .probe = qcom_scm_probe,
+};
+
+builtin_platform_driver(qcom_scm_driver);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 2cce75c08b998..4b384a97455b7 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -36,6 +36,18 @@ extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+#define QCOM_SCM_SVC_PIL 0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
+extern bool __qcom_scm_pas_supported(u32 peripheral);
+extern int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys);
+extern int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int __qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int __qcom_scm_pas_shutdown(u32 peripheral);
+
/* common error codes */
#define QCOM_SCM_ENOMEM -5
#define QCOM_SCM_EOPNOTSUPP -4
@@ -43,5 +55,59 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_EBUSY -55
+#define QCOM_SCM_V2_EBUSY -12
+
+
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_EBUSY:
+ return QCOM_SCM_EBUSY;
+ case QCOM_SCM_V2_EBUSY:
+ return QCOM_SCM_V2_EBUSY;
+ }
+ return -EINVAL;
+}
+
+#define SCM_SVC_BOOT 0x1
+#define SCM_SVC_INFO 0x6
+
+#define GET_FEAT_VERSION_CMD 3
+
+extern int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr);
+extern int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len);
+extern int __qcom_scm_pil_auth_and_reset_cmd(u32 proc);
+extern int __qcom_scm_pil_shutdown_cmd(u32 proc);
+
+extern int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr,
+ u32 len);
+extern int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2]);
+extern int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va,
+ u32 info_size, u32 flags);
+extern int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags);
+
+extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_get_feat_version(u32 feat);
+extern int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int __qcom_scm_restart_proc(u32 proc_id, int restart, u32 *resp);
+extern int __qcom_scm_set_video_state(u32 state, u32 spare);
+extern int __qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size);
+extern int __qcom_scm_init(void);
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index da0c5320789fb..28efc858e6da9 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -79,8 +79,8 @@
*/
#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#else
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 22c7ed63a001d..dc8ab8ccf1133 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -4,6 +4,8 @@ menu "I2C encoder or helper chips"
config DRM_I2C_ADV7511
tristate "AV7511 encoder"
select REGMAP_I2C
+ select DRM_MIPI_DSI
+ depends on SND_SOC
help
Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 2c72eb584ab7c..80aa985e6ca9a 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,6 +1,7 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+adv75xx-y := adv7511.o adv7511_audio.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv75xx.o
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index a02112ba1c3df..220ad834e44e0 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -10,49 +10,21 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
-#include <drm/drm_encoder_slave.h>
+#include <drm/drm_mipi_dsi.h>
#include "adv7511.h"
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
-
-static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
-{
- return to_encoder_slave(encoder)->slave_priv;
-}
-
/* ADI recommended values for proper operation. */
static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x98, 0x03 },
@@ -66,6 +38,23 @@ static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x55, 0x02 },
};
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+ { 0x05, 0xc8 },
+};
+
/* -----------------------------------------------------------------------------
* Register access
*/
@@ -159,6 +148,15 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -194,7 +192,7 @@ static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
ADV7511_CSC_UPDATE_MODE, 0);
}
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -209,7 +207,7 @@ static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
return 0;
}
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -359,6 +357,74 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+ struct drm_display_mode *mode = &adv->curr_mode;
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+static void adv7533_dsi_power_on(struct adv7511 *adv)
+{
+ struct mipi_dsi_device *dsi = adv->dsi;
+
+ if (adv->use_timing_gen)
+ adv7511_dsi_config_timing_gen(adv);
+
+ /* set number of dsi lanes */
+ regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ if (adv->use_timing_gen) {
+ /* reset internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv->regmap_cec, 0x27, 0xcb);
+ } else {
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+ }
+
+ /* enable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv->regmap_cec, 0x55, 0x00);
+
+ regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+}
+
static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -394,9 +460,20 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_on(adv7511);
+
adv7511->powered = true;
}
+static void adv7533_dsi_power_off(struct adv7511 *adv)
+{
+ /* disable hdmi */
+ regmap_write(adv->regmap_cec, 0x03, 0x0b);
+ /* disable internal timing generator */
+ regmap_write(adv->regmap_cec, 0x27, 0x0b);
+}
+
static void adv7511_power_off(struct adv7511 *adv7511)
{
/* TODO: setup additional power down modes */
@@ -405,6 +482,9 @@ static void adv7511_power_off(struct adv7511 *adv7511)
ADV7511_POWER_POWER_DOWN);
regcache_mark_dirty(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ adv7533_dsi_power_off(adv7511);
+
adv7511->powered = false;
}
@@ -430,7 +510,7 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -446,8 +526,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
+ drm_helper_hpd_irq_event(adv7511->connector.dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -464,7 +544,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -481,7 +561,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -563,18 +643,20 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
}
/* -----------------------------------------------------------------------------
- * Encoder operations
+ * ADV75xx helpers
*/
-static int adv7511_get_modes(struct drm_encoder *encoder,
+static int adv7511_get_modes(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
struct edid *edid;
unsigned int count;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HPD_SRC_MASK,
+ ADV7511_REG_POWER2_HPD_SRC_NONE);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
if (adv7511->i2c_main->irq) {
@@ -606,21 +688,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
return count;
}
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
- struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
enum drm_connector_status status;
unsigned int val;
bool hpd;
@@ -644,7 +714,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
+ adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -658,8 +728,8 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
return status;
}
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -667,11 +737,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
return MODE_OK;
}
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
+static void adv7511_mode_set(struct adv7511 *adv7511,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
unsigned int vsync_polarity = 0;
@@ -754,6 +823,28 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ if (adv7511->type == ADV7533 && adv7511->num_dsi_lanes == 4) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ int lanes, ret;
+
+ if (adj_mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ DRM_ERROR("Failed to change host lanes\n");
+ return;
+ }
+ }
+ }
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
@@ -762,26 +853,251 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
-static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
- .dpms = adv7511_encoder_dpms,
- .mode_valid = adv7511_encoder_mode_valid,
- .mode_set = adv7511_encoder_mode_set,
- .detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
+/* Connector funcs */
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+static int adv7511_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static struct drm_encoder *
+adv7511_connector_best_encoder(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv->bridge.encoder;
+}
+
+static enum drm_mode_status
+adv7511_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
+ .get_modes = adv7511_connector_get_modes,
+ .best_encoder = adv7511_connector_best_encoder,
+ .mode_valid = adv7511_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7511_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7511_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7511_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7511_bridge_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7511_bridge_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_off(adv);
+}
+
+static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7533_attach_dsi(struct adv7511 *adv)
+{
+ struct device *dev = &adv->i2c_main->dev;
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ int ret = 0;
+ const struct mipi_dsi_device_info info = { .type = "adv7533",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ host = of_find_mipi_dsi_host_by_node(adv->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv->dsi = dsi;
+
+ dsi->lanes = adv->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_device:
+ return ret;
+}
+
+static int adv7511_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7511_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7511_connector_helper_funcs);
+ drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ if (adv->type == ADV7533)
+ ret = adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7511_bridge_funcs = {
+ .enable = adv7511_bridge_enable,
+ .disable = adv7511_bridge_disable,
+ .mode_set = adv7511_bridge_mode_set,
+ .attach = adv7511_bridge_attach,
};
/* -----------------------------------------------------------------------------
* Probe & remove
*/
+static int adv7533_init_regulators(struct adv7511 *adv)
+{
+ int ret;
+ struct device *dev = &adv->i2c_main->dev;
+
+ adv->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(adv->avdd)) {
+ ret = PTR_ERR(adv->avdd);
+ dev_err(dev, "failed to get avdd regulator %d\n", ret);
+ return ret;
+ }
+
+ adv->v3p3 = devm_regulator_get(dev, "v3p3");
+ if (IS_ERR(adv->v3p3)) {
+ ret = PTR_ERR(adv->v3p3);
+ dev_err(dev, "failed to get v3p3 regulator %d\n", ret);
+ return ret;
+ }
+
+ if (regulator_can_change_voltage(adv->avdd)) {
+ ret = regulator_set_voltage(adv->avdd, 1800000, 1800000);
+ if (ret) {
+ dev_err(dev, "failed to set avdd voltage %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (regulator_can_change_voltage(adv->v3p3)) {
+ ret = regulator_set_voltage(adv->v3p3, 3300000, 3300000);
+ if (ret) {
+ dev_err(dev, "failed to set v3p3 voltage %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* keep the regulators always on */
+ ret = regulator_enable(adv->avdd);
+ if (ret) {
+ dev_err(dev, "failed to enable avdd %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(adv->v3p3);
+ if (ret) {
+ dev_err(dev, "failed to enable v3p3 %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int adv7533_uninit_regulators(struct adv7511 *adv)
+{
+ int ret;
+ struct device *dev = &adv->i2c_main->dev;
+
+ ret = regulator_disable(adv->avdd);
+ if (ret) {
+ dev_err(dev, "failed to disable avdd %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_disable(adv->v3p3);
+ if (ret) {
+ dev_err(dev, "failed to disable v3p3 %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static int adv7511_parse_dt(struct device_node *np,
struct adv7511_link_config *config)
{
const char *str;
int ret;
- memset(config, 0, sizeof(*config));
-
of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
config->input_color_depth != 12)
@@ -859,6 +1175,44 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
+static int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("ADV7533 DSI input endpoint not found\n");
+ return -ENODEV;
+ }
+
+ adv->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv->host_node) {
+ DRM_ERROR("DSI host node not found\n");
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv->host_node);
+
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv->rgb = true;
+ adv->embedded_sync = false;
+
+ return 0;
+}
+
static const int edid_i2c_addr = 0x7e;
static const int packet_i2c_addr = 0x70;
static const int cec_i2c_addr = 0x78;
@@ -881,10 +1235,28 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
- ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (dev->of_node)
+ adv7511->type = (enum adv7511_type) of_device_get_match_data(dev);
+ else
+ adv7511->type = id->driver_data;
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
if (ret)
return ret;
+ adv7511->i2c_main = i2c;
+
+ if (adv7511->type == ADV7533) {
+ ret = adv7533_init_regulators(adv7511);
+ if (ret)
+ return ret;
+ }
+
/*
* The power down GPIO is optional. If present, toggle it from active to
* inactive to wake up the encoder.
@@ -907,8 +1279,15 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
dev_dbg(dev, "Rev. %d\n", val);
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
+ if (adv7511->type == ADV7511) {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ } else {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+ }
if (ret)
return ret;
@@ -918,11 +1297,31 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
adv7511_packet_disable(adv7511, 0xffff);
- adv7511->i2c_main = i2c;
adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
if (!adv7511->i2c_edid)
return -ENOMEM;
+ adv7511->i2c_cec = i2c_new_dummy(i2c->adapter, cec_i2c_addr >> 1);
+ if (!adv7511->i2c_cec) {
+ ret = -ENOMEM;
+ goto err_i2c_unregister_edid;
+ }
+
+ adv7511->regmap_cec = devm_regmap_init_i2c(adv7511->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv7511->regmap_cec)) {
+ ret = PTR_ERR(adv7511->regmap_cec);
+ goto err_i2c_unregister_cec;
+ }
+
+ if (adv7511->type == ADV7533) {
+ ret = regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ goto err_i2c_unregister_cec;
+ }
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -931,7 +1330,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
IRQF_ONESHOT, dev_name(dev),
adv7511);
if (ret)
- goto err_i2c_unregister_device;
+ goto err_i2c_unregister_cec;
}
/* CEC is unused for now */
@@ -942,11 +1341,25 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
i2c_set_clientdata(i2c, adv7511);
- adv7511_set_link_config(adv7511, &link_config);
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ adv7511->bridge.funcs = &adv7511_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7511 bridge\n");
+ goto err_i2c_unregister_cec;
+ }
+
+ adv7511_audio_init(dev);
return 0;
-err_i2c_unregister_device:
+err_i2c_unregister_cec:
+ i2c_unregister_device(adv7511->i2c_cec);
+err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
return ret;
@@ -956,66 +1369,69 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- i2c_unregister_device(adv7511->i2c_edid);
-
- kfree(adv7511->edid);
+ if (adv7511->type == ADV7533) {
+ mipi_dsi_detach(adv7511->dsi);
+ mipi_dsi_device_unregister(adv7511->dsi);
+ adv7533_uninit_regulators(adv7511);
+ }
- return 0;
-}
+ drm_bridge_remove(&adv7511->bridge);
-static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
- struct drm_encoder_slave *encoder)
-{
+ adv7511_audio_exit(&i2c->dev);
- struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
-
- encoder->slave_priv = adv7511;
- encoder->slave_funcs = &adv7511_encoder_funcs;
+ i2c_unregister_device(adv7511->i2c_cec);
+ i2c_unregister_device(adv7511->i2c_edid);
- adv7511->encoder = &encoder->base;
+ kfree(adv7511->edid);
return 0;
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+ { "adv7533", ADV7533 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
+ { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
+ { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-static struct drm_i2c_encoder_driver adv7511_driver = {
- .i2c_driver = {
- .driver = {
- .name = "adv7511",
- .of_match_table = adv7511_of_ids,
- },
- .id_table = adv7511_i2c_ids,
- .probe = adv7511_probe,
- .remove = adv7511_remove,
- },
+static struct mipi_dsi_driver adv7533_dsi_driver = {
+ .driver.name = "adv7533",
+};
- .encoder_init = adv7511_encoder_init,
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .name = "adv7511",
+ .of_match_table = adv7511_of_ids,
+ },
+ .id_table = adv7511_i2c_ids,
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
};
static int __init adv7511_init(void)
{
- return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
+ mipi_dsi_driver_register(&adv7533_dsi_driver);
+
+ return i2c_add_driver(&adv7511_driver);
}
module_init(adv7511_init);
static void __exit adv7511_exit(void)
{
- drm_i2c_encoder_unregister(&adv7511_driver);
+ i2c_del_driver(&adv7511_driver);
+
+ mipi_dsi_driver_unregister(&adv7533_dsi_driver);
}
module_exit(adv7511_exit);
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 38515b30cedfc..64120fd3f4de3 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -11,6 +11,16 @@
#include <linux/hdmi.h>
+#include <drm/drm_crtc_helper.h>
+
+struct regmap;
+struct adv7511;
+
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet);
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet);
+int adv7511_audio_init(struct device *dev);
+void adv7511_audio_exit(struct device *dev);
+
#define ADV7511_REG_CHIP_REVISION 0x00
#define ADV7511_REG_N0 0x01
#define ADV7511_REG_N1 0x02
@@ -229,6 +239,56 @@ enum adv7511_sync_polarity {
ADV7511_SYNC_POLARITY_HIGH,
};
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+ bool use_timing_gen;
+
+ struct regulator *avdd;
+ struct regulator *v3p3;
+
+ enum adv7511_type type;
+};
+
/**
* struct adv7511_link_config - Describes adv7511 hardware configuration
* @input_color_depth: Number of bits per color component (8, 10 or 12)
diff --git a/drivers/gpu/drm/i2c/adv7511_audio.c b/drivers/gpu/drm/i2c/adv7511_audio.c
new file mode 100644
index 0000000000000..52019e95d0070
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511_audio.c
@@ -0,0 +1,312 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "adv7511.h"
+
+static const struct snd_soc_dapm_widget adv7511_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TMDS"),
+ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adv7511_routes[] = {
+ { "TMDS", NULL, "AIFIN" },
+};
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+static int adv7511_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate;
+ unsigned int len;
+ switch (params_rate(params)) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case SNDRV_PCM_FORMAT_S18_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511->f_audio = params_rate(params);
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int adv7511_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+// case SND_SOC_DAIFMT_SPDIF:
+// audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+// break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_clock = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert_clock = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ return 0;
+}
+
+static int adv7511_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ switch (adv7511->audio_source) {
+ case ADV7511_AUDIO_SOURCE_I2S:
+ break;
+ case ADV7511_AUDIO_SOURCE_SPDIF:
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_AUDIO_CONFIG, BIT(7),
+ BIT(7));
+ break;
+ }
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ } else {
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+ dapm->bias_level = level;
+ return 0;
+}
+
+#define ADV7511_RATES (SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+#define ADV7511_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops adv7511_dai_ops = {
+ .hw_params = adv7511_hw_params,
+ /*.set_sysclk = adv7511_set_dai_sysclk,*/
+ .set_fmt = adv7511_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver adv7511_dai = {
+ .name = "adv7511",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ADV7511_RATES,
+ .formats = ADV7511_FORMATS,
+ },
+ .ops = &adv7511_dai_ops,
+};
+
+static int adv7511_suspend(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+}
+
+static int adv7511_resume(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_probe(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_remove(struct snd_soc_codec *codec)
+{
+ adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static struct snd_soc_codec_driver adv7511_codec_driver = {
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .suspend = adv7511_suspend,
+ .resume = adv7511_resume,
+ .set_bias_level = adv7511_set_bias_level,
+
+ .dapm_widgets = adv7511_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adv7511_dapm_widgets),
+ .dapm_routes = adv7511_routes,
+ .num_dapm_routes = ARRAY_SIZE(adv7511_routes),
+};
+
+int adv7511_audio_init(struct device *dev)
+{
+ return snd_soc_register_codec(dev, &adv7511_codec_driver,
+ &adv7511_dai, 1);
+}
+
+void adv7511_audio_exit(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 215495c2780c0..7c7a0314a7563 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select SND_SOC_HDMI_CODEC if SND_SOC
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -23,6 +24,13 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_HDMI_HDCP
+ bool "Enable HDMI HDCP support in MSM DRM driver"
+ depends on DRM_MSM && QCOM_SCM
+ default y
+ help
+ Choose this option to enable HDCP state machine
+
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index ddb4c9d097e45..d7cf79635e73e 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -10,7 +10,6 @@ msm-y := \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
- hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
@@ -56,6 +55,8 @@ msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
+msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
+
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
mdp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4282ec6bbaafd..51fbbf82d67a7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -921,7 +921,9 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */
+ mdelay(100);
dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb();
}
static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
@@ -1488,7 +1490,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
@@ -1502,7 +1504,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 58ba7ec17f512..c8d1f19c9a6d9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -198,9 +198,13 @@ static enum drm_connector_status dsi_mgr_connector_detect(
static void dsi_mgr_connector_destroy(struct drm_connector *connector)
{
+ struct dsi_connector *dsi_connector = to_dsi_connector(connector);
+
DBG("");
- drm_connector_unregister(connector);
+
drm_connector_cleanup(connector);
+
+ kfree(dsi_connector);
}
static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
@@ -538,12 +542,9 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
struct dsi_connector *dsi_connector;
int ret, i;
- dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
- sizeof(*dsi_connector), GFP_KERNEL);
- if (!dsi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ dsi_connector = kzalloc(sizeof(*dsi_connector), GFP_KERNEL);
+ if (!dsi_connector)
+ return ERR_PTR(-ENOMEM);
dsi_connector->id = id;
@@ -552,7 +553,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
ret = drm_connector_init(msm_dsi->dev, connector,
&dsi_mgr_connector_funcs, DRM_MODE_CONNECTOR_DSI);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_connector_helper_add(connector, &dsi_mgr_conn_helper_funcs);
@@ -565,21 +566,11 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- ret = drm_connector_register(connector);
- if (ret)
- goto fail;
-
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
drm_mode_connector_attach_encoder(connector,
msm_dsi->encoders[i]);
return connector;
-
-fail:
- if (connector)
- dsi_mgr_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
/* initialize bridge */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index 5cd438f91afea..019e92d51f6de 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -83,6 +83,13 @@ void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw)
dsi_pll_disable(pll);
}
+int msm_dsi_pll_helper_clk_is_enabled(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+
+ return pll->pll_on;
+}
+
void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
struct clk **clks, u32 num_clks)
{
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index 2cf1664723e83..ed632688ac667 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -76,6 +76,7 @@ long msm_dsi_pll_helper_clk_round_rate(struct clk_hw *hw,
unsigned long rate, unsigned long *parent_rate);
int msm_dsi_pll_helper_clk_prepare(struct clk_hw *hw);
void msm_dsi_pll_helper_clk_unprepare(struct clk_hw *hw);
+int msm_dsi_pll_helper_clk_is_enabled(struct clk_hw *hw);
/* misc */
void msm_dsi_pll_helper_unregister_clks(struct platform_device *pdev,
struct clk **clks, u32 num_clks);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
index 598fdaff0a41a..20c571fc97c98 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm.c
@@ -248,15 +248,6 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
- struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
- return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
- POLL_TIMEOUT_US);
-}
-
static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -312,7 +303,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = msm_dsi_pll_helper_clk_prepare,
.unprepare = msm_dsi_pll_helper_clk_unprepare,
- .is_enabled = dsi_pll_28nm_clk_is_enabled,
+ .is_enabled = msm_dsi_pll_helper_clk_is_enabled,
};
/*
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
index 38c90e1eb0028..ab7743cfc01af 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_28nm_8960.c
@@ -156,15 +156,6 @@ static int dsi_pll_28nm_clk_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static int dsi_pll_28nm_clk_is_enabled(struct clk_hw *hw)
-{
- struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
- struct dsi_pll_28nm *pll_28nm = to_pll_28nm(pll);
-
- return pll_28nm_poll_for_ready(pll_28nm, POLL_MAX_READS,
- POLL_TIMEOUT_US);
-}
-
static unsigned long dsi_pll_28nm_clk_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
@@ -206,7 +197,7 @@ static const struct clk_ops clk_ops_dsi_pll_28nm_vco = {
.recalc_rate = dsi_pll_28nm_clk_recalc_rate,
.prepare = msm_dsi_pll_helper_clk_prepare,
.unprepare = msm_dsi_pll_helper_clk_unprepare,
- .is_enabled = dsi_pll_28nm_clk_is_enabled,
+ .is_enabled = msm_dsi_pll_helper_clk_is_enabled,
};
/*
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index b4d1b469862a4..72360cd038c02 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -37,7 +37,7 @@ static void edp_connector_destroy(struct drm_connector *connector)
struct edp_connector *edp_connector = to_edp_connector(connector);
DBG("");
- drm_connector_unregister(connector);
+
drm_connector_cleanup(connector);
kfree(edp_connector);
@@ -124,10 +124,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
int ret;
edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
- if (!edp_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!edp_connector)
+ return ERR_PTR(-ENOMEM);
edp_connector->edp = edp;
@@ -136,7 +134,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_connector_helper_add(connector, &edp_connector_helper_funcs);
@@ -147,17 +145,7 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- ret = drm_connector_register(connector);
- if (ret)
- goto fail;
-
drm_mode_connector_attach_encoder(connector, edp->encoder);
return connector;
-
-fail:
- if (connector)
- edp_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 51b9ea552f976..f159fa8e11d8a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -19,6 +19,8 @@
#include <linux/of_irq.h>
#include <linux/of_gpio.h>
+#include <sound/hdmi-codec.h>
+#include <sound/msm_hdmi_audio.h>
#include "hdmi.h"
void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +436,7 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
return gpio;
}
+static void msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev);
static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
{
struct drm_device *drm = dev_get_drvdata(master);
@@ -467,6 +470,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi))
return PTR_ERR(hdmi);
priv->hdmi = hdmi;
+ msm_hdmi_register_audio_driver(hdmi, dev);
return 0;
}
@@ -477,6 +481,7 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
if (priv->hdmi) {
+ platform_device_unregister(priv->hdmi->audio_pdev);
msm_hdmi_destroy(priv->hdmi);
priv->hdmi = NULL;
}
@@ -486,6 +491,124 @@ static const struct component_ops msm_hdmi_ops = {
.bind = msm_hdmi_bind,
.unbind = msm_hdmi_unbind,
};
+/*
+ * HDMI audio codec callbacks
+ */
+
+static int msm_hdmi_audio_hw_params(struct device *dev,
+ struct hdmi_codec_daifmt *daifmt,
+ struct hdmi_codec_params *params)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+ unsigned int chan;// = params->cea.channels;
+ unsigned int channel_allocation = 0;
+ unsigned int rate;//
+ unsigned int level_shift = 0; /* 0dB */
+ bool down_mix = false;
+ dev_dbg(dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
+ params->sample_rate, params->sample_width, chan);
+
+ switch (params->cea.channels) {
+ case 2:
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_2;
+ break;
+ case 4:
+ channel_allocation = 0;
+ chan = MSM_HDMI_AUDIO_CHANNEL_4;
+ break;
+ case 6:
+ channel_allocation = 0x0B;
+ chan = MSM_HDMI_AUDIO_CHANNEL_6;
+ break;
+ case 8:
+ channel_allocation = 0x1F;
+ chan = MSM_HDMI_AUDIO_CHANNEL_8;
+ break;
+ default:
+ //dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
+ return -EINVAL;
+ }
+
+ switch (params->sample_rate) {
+ case 32000:
+ rate = HDMI_SAMPLE_RATE_32KHZ;
+ case 44100:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ case 48000:
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ case 88200:
+ rate = HDMI_SAMPLE_RATE_88_2KHZ;
+ case 96000:
+ rate = HDMI_SAMPLE_RATE_96KHZ;
+ case 176400:
+ rate = HDMI_SAMPLE_RATE_176_4KHZ;
+ case 192000:
+ rate = HDMI_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ dev_err(dev, "rate[%d] not supported!\n",
+ params->sample_rate);
+ return -EINVAL;
+ }
+ rate = HDMI_SAMPLE_RATE_48KHZ;
+ channel_allocation = 0;
+
+ //FIXME..
+ msm_hdmi_audio_set_sample_rate(hdmi, rate);
+
+ msm_hdmi_audio_info_setup(hdmi, 1, chan,
+ channel_allocation, level_shift, down_mix);
+
+
+
+ return 0;
+}
+
+static int msm_hdmi_audio_startup(struct device *dev,
+ void (*abort_cb)(struct device *dev))
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ //msm_hdmi_audio_enable(hdmi);
+
+ return 0;
+}
+
+static void msm_hdmi_audio_shutdown(struct device *dev)
+{
+ struct hdmi *hdmi = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
+}
+
+static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
+ .hw_params = msm_hdmi_audio_hw_params,
+ .audio_startup = msm_hdmi_audio_startup,
+ .audio_shutdown = msm_hdmi_audio_shutdown,
+};
+
+static void msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &msm_hdmi_audio_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+ };
+ //struct platform_device *pdev;
+
+ hdmi->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO, &codec_data,
+ sizeof(codec_data));
+ if (IS_ERR(hdmi->audio_pdev))
+ return;
+
+ DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
+}
static int msm_hdmi_dev_probe(struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 65428cf233ce3..66c5346883dc7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
struct hdmi {
struct drm_device *dev;
struct platform_device *pdev;
+ struct platform_device *audio_pdev;
const struct hdmi_platform_config *config;
@@ -243,10 +244,21 @@ struct i2c_adapter *msm_hdmi_i2c_init(struct hdmi *hdmi);
/*
* hdcp
*/
+#ifdef CONFIG_DRM_MSM_HDMI_HDCP
struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi);
void msm_hdmi_hdcp_destroy(struct hdmi *hdmi);
void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+#else
+static inline struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
+{
+ return ERR_PTR(-ENXIO);
+}
+static inline void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) {}
+static inline void msm_hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+static inline void msm_hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl) {}
+#endif
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 26129bff2dd69..77b0f6e2b3293 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -112,6 +112,9 @@ static int gpio_config(struct hdmi *hdmi, bool on)
for (i = 0; i < HDMI_MAX_NUM_GPIO; i++) {
struct hdmi_gpio_data gpio = config->gpios[i];
+ if (gpio.num == -1)
+ continue;
+
if (gpio.output) {
int value = gpio.value ? 0 : 1;
@@ -126,8 +129,10 @@ static int gpio_config(struct hdmi *hdmi, bool on)
return 0;
err:
- while (i--)
- gpio_free(config->gpios[i].num);
+ while (i--) {
+ if (config->gpios[i].num != -1)
+ gpio_free(config->gpios[i].num);
+ }
return ret;
}
@@ -281,6 +286,10 @@ void msm_hdmi_connector_irq(struct drm_connector *connector)
static enum drm_connector_status detect_reg(struct hdmi *hdmi)
{
uint32_t hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ /* force detect to show connected status */
+ return connector_status_connected;
+
return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
connector_status_connected : connector_status_disconnected;
}
@@ -341,7 +350,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
hdp_disable(hdmi_connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(hdmi_connector);
@@ -433,10 +441,8 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
int ret;
hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
- if (!hdmi_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!hdmi_connector)
+ return ERR_PTR(-ENOMEM);
hdmi_connector->hdmi = hdmi;
INIT_WORK(&hdmi_connector->hpd_work, msm_hdmi_hotplug_work);
@@ -453,21 +459,13 @@ struct drm_connector *msm_hdmi_connector_init(struct hdmi *hdmi)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
ret = hpd_enable(hdmi_connector);
if (ret) {
dev_err(&hdmi->pdev->dev, "failed to enable HPD: %d\n", ret);
- goto fail;
+ return ERR_PTR(ret);
}
drm_mode_connector_attach_encoder(connector, hdmi->encoder);
return connector;
-
-fail:
- if (connector)
- hdmi_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index e73e1742b2500..2648cd7631efc 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -48,7 +48,6 @@ static void mdp4_lvds_connector_destroy(struct drm_connector *connector)
struct mdp4_lvds_connector *mdp4_lvds_connector =
to_mdp4_lvds_connector(connector);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(mdp4_lvds_connector);
@@ -121,13 +120,10 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct mdp4_lvds_connector *mdp4_lvds_connector;
- int ret;
mdp4_lvds_connector = kzalloc(sizeof(*mdp4_lvds_connector), GFP_KERNEL);
- if (!mdp4_lvds_connector) {
- ret = -ENOMEM;
- goto fail;
- }
+ if (!mdp4_lvds_connector)
+ return ERR_PTR(-ENOMEM);
mdp4_lvds_connector->encoder = encoder;
mdp4_lvds_connector->panel_node = panel_node;
@@ -143,15 +139,7 @@ struct drm_connector *mdp4_lvds_connector_init(struct drm_device *dev,
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
drm_mode_connector_attach_encoder(connector, encoder);
return connector;
-
-fail:
- if (connector)
- mdp4_lvds_connector_destroy(connector);
-
- return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 69094cb281037..e1695c0921f67 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -32,39 +32,23 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
+#ifdef CONFIG_MSM_BUS_SCALING
#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
- { \
- .src = MSM_BUS_MASTER_MDP_PORT0, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ab = (ab_val), \
- .ib = (ib_val), \
- }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
- MDP_BUS_VECTOR_ENTRY(0, 0),
- MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[0],
-}, {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
- .usecase = mdp_bus_usecases,
- .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
- .name = "mdss_mdp",
-};
static void bs_init(struct mdp5_cmd_encoder *mdp5_cmd_enc)
{
- mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
- &mdp_bus_scale_table);
+ struct drm_encoder *encoder = &mdp5_cmd_enc->base;
+ struct platform_device *pdev = encoder->dev->platformdev;
+ struct msm_bus_scale_pdata *bus_scale_table;
+
+ bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!bus_scale_table) {
+ DBG("bus scaling is disabled\n");
+ } else {
+ mdp5_cmd_enc->bsc = msm_bus_scale_register_client(
+ bus_scale_table);
+ }
+
DBG("bus scale client: %08x", mdp5_cmd_enc->bsc);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1d95f9fd9dc7a..7937e7e5b6440 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -38,39 +38,23 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
return to_mdp5_kms(to_mdp_kms(priv->kms));
}
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <mach/msm_bus.h>
-#include <mach/msm_bus_board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val) \
- { \
- .src = MSM_BUS_MASTER_MDP_PORT0, \
- .dst = MSM_BUS_SLAVE_EBI_CH0, \
- .ab = (ab_val), \
- .ib = (ib_val), \
- }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
- MDP_BUS_VECTOR_ENTRY(0, 0),
- MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[0],
-}, {
- .num_paths = 1,
- .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
- .usecase = mdp_bus_usecases,
- .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
- .name = "mdss_mdp",
-};
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <linux/msm-bus.h>
static void bs_init(struct mdp5_encoder *mdp5_encoder)
{
- mdp5_encoder->bsc = msm_bus_scale_register_client(
- &mdp_bus_scale_table);
+ struct drm_encoder *encoder = &mdp5_encoder->base;
+ struct platform_device *pdev = encoder->dev->platformdev;
+ struct msm_bus_scale_pdata *bus_scale_table;
+
+ bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!bus_scale_table) {
+ DBG("bus scaling is disabled\n");
+ } else {
+ mdp5_encoder->bsc = msm_bus_scale_register_client(
+ bus_scale_table);
+ }
+
DBG("bus scale client: %08x", mdp5_encoder->bsc);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d3..62de10488b99c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -32,6 +32,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
unsigned long flags;
pm_runtime_get_sync(dev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
@@ -63,7 +64,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ //pm_runtime_put_sync(dev->dev);
return 0;
}
@@ -159,6 +161,22 @@ static const struct mdp_kms_funcs kms_funcs = {
.set_irqmask = mdp5_set_irqmask,
};
+static void mdp5_disable_bus_clocks(struct mdp5_kms *mdp5_kms)
+{
+ if (mdp5_kms->mmagic_mdss_axi_clk)
+ clk_disable_unprepare(mdp5_kms->mmagic_mdss_axi_clk);
+ if (mdp5_kms->rpm_mmaxi_clk)
+ clk_disable_unprepare(mdp5_kms->rpm_mmaxi_clk);
+}
+
+static void mdp5_enable_bus_clocks(struct mdp5_kms *mdp5_kms)
+{
+ if (mdp5_kms->rpm_mmaxi_clk)
+ clk_prepare_enable(mdp5_kms->rpm_mmaxi_clk);
+ if (mdp5_kms->mmagic_mdss_axi_clk)
+ clk_prepare_enable(mdp5_kms->mmagic_mdss_axi_clk);
+}
+
int mdp5_disable(struct mdp5_kms *mdp5_kms)
{
DBG("");
@@ -166,9 +184,14 @@ int mdp5_disable(struct mdp5_kms *mdp5_kms)
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
+ if (mdp5_kms->iommu_clk)
+ clk_disable_unprepare(mdp5_kms->iommu_clk);
if (mdp5_kms->lut_clk)
clk_disable_unprepare(mdp5_kms->lut_clk);
+ if (mdp5_kms->mmagic_ahb_clk)
+ clk_disable_unprepare(mdp5_kms->mmagic_ahb_clk);
+ mdp5_disable_bus_clocks(mdp5_kms);
return 0;
}
@@ -176,11 +199,17 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
{
DBG("");
+ mdp5_enable_bus_clocks(mdp5_kms);
+
+ if (mdp5_kms->mmagic_ahb_clk)
+ clk_prepare_enable(mdp5_kms->mmagic_ahb_clk);
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
if (mdp5_kms->lut_clk)
clk_prepare_enable(mdp5_kms->lut_clk);
+ if (mdp5_kms->iommu_clk)
+ clk_prepare_enable(mdp5_kms->iommu_clk);
return 0;
}
@@ -432,7 +461,7 @@ static void read_hw_revision(struct mdp5_kms *mdp5_kms,
mdp5_enable(mdp5_kms);
version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION);
- mdp5_disable(mdp5_kms);
+ //mdp5_disable(mdp5_kms);
*major = FIELD(version, MDSS_HW_VERSION_MAJOR);
*minor = FIELD(version, MDSS_HW_VERSION_MINOR);
@@ -647,6 +676,14 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
/* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);
+ get_clk(pdev, &mdp5_kms->mmagic_ahb_clk, "mmagic_iface_clk", false);
+ get_clk(pdev, &mdp5_kms->iommu_clk, "iommu_clk", false);
+
+ /* HACK: get bus clock */
+ get_clk(pdev, &mdp5_kms->mmagic_mdss_axi_clk, "mmagic_mdss_bus_clk",
+ false);
+ get_clk(pdev, &mdp5_kms->rpm_mmaxi_clk, "rpm_mmaxi_clk",
+ false);
/* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a
@@ -669,6 +706,10 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
/* TODO: compute core clock rate at runtime */
clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
+ /* HACK : set the axi clock to some valid rate */
+ if (mdp5_kms->mmagic_mdss_axi_clk)
+ clk_set_rate(mdp5_kms->mmagic_mdss_axi_clk, 75000000);
+
/*
* Some chipsets have a Shared Memory Pool (SMP), while others
* have dedicated latency buffering per source pipe instead;
@@ -703,7 +744,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
}
- mdp5_disable(mdp5_kms);
+ /* TODO: Remove this after runtime pm adaptation */
+ //mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9a25898239d3d..a11918888c6cf 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -53,6 +53,12 @@ struct mdp5_kms {
struct clk *core_clk;
struct clk *lut_clk;
struct clk *vsync_clk;
+ struct clk *mmagic_ahb_clk;
+ struct clk *iommu_clk;
+
+ /* bus clocks */
+ struct clk *rpm_mmaxi_clk;
+ struct clk *mmagic_mdss_axi_clk;
/*
* lock to protect access to global resources: ie., following register:
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c03b96709179b..e040a64c0c7f3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -173,13 +173,11 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
return 0;
}
-/*
- * DRM operations:
- */
-
-static int msm_unload(struct drm_device *dev)
+static int msm_drm_uninit(struct device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
@@ -195,31 +193,34 @@ static int msm_unload(struct drm_device *dev)
kfree(vbl_ev);
}
- drm_kms_helper_poll_fini(dev);
+ drm_kms_helper_poll_fini(ddev);
+
+ drm_connector_unplug_all(ddev);
+
+ drm_dev_unregister(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fbdev && priv->fbdev)
- msm_fbdev_free(dev);
+ msm_fbdev_free(ddev);
#endif
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
+ drm_mode_config_cleanup(ddev);
- pm_runtime_get_sync(dev->dev);
- drm_irq_uninstall(dev);
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_get_sync(dev);
+ drm_irq_uninstall(ddev);
+ pm_runtime_put_sync(dev);
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
if (kms) {
- pm_runtime_disable(dev->dev);
+ pm_runtime_disable(dev);
kms->funcs->destroy(kms);
}
if (gpu) {
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&ddev->struct_mutex);
gpu->funcs->pm_suspend(gpu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
}
@@ -227,13 +228,14 @@ static int msm_unload(struct drm_device *dev)
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
drm_mm_takedown(&priv->vram.mm);
- dma_free_attrs(dev->dev, priv->vram.size, NULL,
- priv->vram.paddr, &attrs);
+ dma_free_attrs(dev, priv->vram.size, NULL,
+ priv->vram.paddr, &attrs);
}
- component_unbind_all(dev->dev, dev);
+ component_unbind_all(dev, ddev);
- dev->dev_private = NULL;
+ ddev->dev_private = NULL;
+ drm_dev_unref(ddev);
kfree(priv);
@@ -321,20 +323,31 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
}
-static int msm_load(struct drm_device *dev, unsigned long flags)
+static int msm_drm_init(struct device *dev, struct drm_driver *drv)
{
- struct platform_device *pdev = dev->platformdev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *ddev;
struct msm_drm_private *priv;
+ struct drm_connector *connector;
struct msm_kms *kms;
int ret;
+ ddev = drm_dev_alloc(drv, dev);
+ if (!ddev) {
+ dev_err(dev, "failed to allocate drm_device\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, ddev);
+ ddev->platformdev = pdev;
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- dev_err(dev->dev, "failed to allocate private data\n");
+ drm_dev_unref(ddev);
return -ENOMEM;
}
- dev->dev_private = priv;
+ ddev->dev_private = priv;
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
@@ -346,25 +359,26 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
- drm_mode_config_init(dev);
-
- platform_set_drvdata(pdev, dev);
+ drm_mode_config_init(ddev);
/* Bind all our sub-components: */
- ret = component_bind_all(dev->dev, dev);
- if (ret)
+ ret = component_bind_all(dev, ddev);
+ if (ret) {
+ kfree(priv);
+ drm_dev_unref(ddev);
return ret;
+ }
- ret = msm_init_vram(dev);
+ ret = msm_init_vram(ddev);
if (ret)
goto fail;
switch (get_mdp_ver(pdev)) {
case 4:
- kms = mdp4_kms_init(dev);
+ kms = mdp4_kms_init(ddev);
break;
case 5:
- kms = mdp5_kms_init(dev);
+ kms = mdp5_kms_init(ddev);
break;
default:
kms = ERR_PTR(-ENODEV);
@@ -378,7 +392,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* and (for example) use dmabuf/prime to share buffers with
* imx drm driver on iMX5
*/
- dev_err(dev->dev, "failed to load kms\n");
+ dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
goto fail;
}
@@ -386,50 +400,71 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->kms = kms;
if (kms) {
- pm_runtime_enable(dev->dev);
+ pm_runtime_enable(dev);
ret = kms->funcs->hw_init(kms);
if (ret) {
- dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ dev_err(dev, "kms hw init failed: %d\n", ret);
goto fail;
}
}
- dev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.funcs = &mode_config_funcs;
- ret = drm_vblank_init(dev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
- dev_err(dev->dev, "failed to initialize vblank\n");
+ dev_err(dev, "failed to initialize vblank\n");
goto fail;
}
- pm_runtime_get_sync(dev->dev);
- ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
- pm_runtime_put_sync(dev->dev);
+ pm_runtime_get_sync(dev);
+ ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
+ //pm_runtime_put_sync(dev);
if (ret < 0) {
- dev_err(dev->dev, "failed to install IRQ handler\n");
+ dev_err(dev, "failed to install IRQ handler\n");
goto fail;
}
- drm_mode_config_reset(dev);
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto fail;
+
+ mutex_lock(&ddev->mode_config.mutex);
+
+ drm_for_each_connector(connector, ddev) {
+ ret = drm_connector_register(connector);
+ if (ret) {
+ dev_err(dev, "failed to register connectors\n");
+ mutex_unlock(&ddev->mode_config.mutex);
+ goto fail;
+ }
+ }
+
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ drm_mode_config_reset(ddev);
#ifdef CONFIG_DRM_FBDEV_EMULATION
if (fbdev)
- priv->fbdev = msm_fbdev_init(dev);
+ priv->fbdev = msm_fbdev_init(ddev);
#endif
- ret = msm_debugfs_late_init(dev);
+ ret = msm_debugfs_late_init(ddev);
if (ret)
goto fail;
- drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_init(ddev);
return 0;
fail:
- msm_unload(dev);
+ msm_drm_uninit(dev);
return ret;
}
+/*
+ * DRM operations:
+ */
+
static void load_gpu(struct drm_device *dev)
{
static DEFINE_MUTEX(init_lock);
@@ -465,7 +500,6 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- struct msm_kms *kms = priv->kms;
mutex_lock(&dev->struct_mutex);
if (ctx == priv->lastctx)
@@ -952,8 +986,6 @@ static struct drm_driver msm_driver = {
DRIVER_RENDER |
DRIVER_ATOMIC |
DRIVER_MODESET,
- .load = msm_load,
- .unload = msm_unload,
.open = msm_open,
.preclose = msm_preclose,
.lastclose = msm_lastclose,
@@ -1053,12 +1085,12 @@ static int add_components(struct device *dev, struct component_match **matchptr,
static int msm_drm_bind(struct device *dev)
{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
+ return msm_drm_init(dev, &msm_driver);
}
static void msm_drm_unbind(struct device *dev)
{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+ msm_drm_uninit(dev);
}
static const struct component_master_ops msm_drm_ops = {
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a7a0b6d9b057c..b733469d7a03f 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,7 @@
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
+ bool has_ctx;
};
#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
@@ -35,14 +36,46 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int i, ret;
+
+ if (!iommu->has_ctx)
+ return iommu_attach_device(iommu->domain, mmu->dev);
+
+ for (i = 0; i < cnt; i++) {
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(mmu->dev, "couldn't get %s context", names[i]);
+ continue;
+ }
+
+ ret = iommu_attach_device(iommu->domain, ctx);
+ if (ret) {
+ dev_warn(mmu->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- iommu_detach_device(iommu->domain, mmu->dev);
+ int i;
+
+ if (!iommu->has_ctx)
+ iommu_detach_device(iommu->domain, mmu->dev);
+
+ for (i = 0; i < cnt; i++) {
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+
+ iommu_detach_device(iommu->domain, ctx);
+ }
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
@@ -59,10 +92,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ dma_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -101,7 +134,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
@@ -138,5 +171,9 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
+ if (of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v2") ||
+ of_find_compatible_node(NULL, NULL, "qcom,msm-mmu-500"))
+ iommu->has_ctx = true;
+
return &iommu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index b8ca9a0e9170b..73445c70cde78 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -45,4 +45,12 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+#ifdef CONFIG_QCOM_IOMMU_V1
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 4f9c5c6deaed1..df4f13f15e9c3 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2097,6 +2097,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT, USB_PROD_ID_LILLIPUT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0238f0169e48f..2f2d18ad65eee 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1067,4 +1067,8 @@
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003
+/* Lilliput Capacitive TouchScreen */
+#define USB_VENDOR_ID_LILLIPUT 0x1391
+#define USB_PROD_ID_LILLIPUT 0x2112
+
#endif
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index c741f5e50a668..578e34bfa5a13 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1256,6 +1256,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) },
+ /* Lilliput multitouch panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT,
+ USB_PROD_ID_LILLIPUT) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index e317b75357a01..7ba4fe1bfd3ee 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -28,6 +28,7 @@
#define PON_RT_STS 0x10
#define PON_KPDPWR_N_SET BIT(0)
+#define PON_RESIN_N_SET BIT(1)
#define PON_PS_HOLD_RST_CTL 0x5a
#define PON_PS_HOLD_RST_CTL2 0x5b
@@ -37,6 +38,7 @@
#define PON_PS_HOLD_TYPE_HARD_RESET 7
#define PON_PULL_CTL 0x70
+#define PON_RESIN_PULL_UP BIT(0)
#define PON_KPDPWR_PULL_UP BIT(1)
#define PON_DBC_CTL 0x71
@@ -52,6 +54,7 @@ struct pm8941_pwrkey {
unsigned int revision;
struct notifier_block reboot_notifier;
+ unsigned int resin_code;
};
static int pm8941_reboot_notify(struct notifier_block *nb,
@@ -130,6 +133,25 @@ static irqreturn_t pm8941_pwrkey_irq(int irq, void *_data)
return IRQ_HANDLED;
}
+static irqreturn_t pm8941_resin_irq(int irq, void *_data)
+{
+ struct pm8941_pwrkey *pwrkey = _data;
+ unsigned int sts;
+ int error;
+
+ error = regmap_read(pwrkey->regmap,
+ pwrkey->baseaddr + PON_RT_STS, &sts);
+ if (error)
+ return IRQ_HANDLED;
+
+ input_report_key(pwrkey->input, pwrkey->resin_code,
+ !!(sts & PON_RESIN_N_SET));
+
+ input_sync(pwrkey->input);
+
+ return IRQ_HANDLED;
+}
+
static int __maybe_unused pm8941_pwrkey_suspend(struct device *dev)
{
struct pm8941_pwrkey *pwrkey = dev_get_drvdata(dev);
@@ -153,6 +175,46 @@ static int __maybe_unused pm8941_pwrkey_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(pm8941_pwr_key_pm_ops,
pm8941_pwrkey_suspend, pm8941_pwrkey_resume);
+static void pm8941_resin_setup(struct platform_device *pdev,
+ struct pm8941_pwrkey *pwrkey)
+{
+ int irq, error;
+ bool pull_up;
+ u32 code;
+
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return;
+
+ pull_up = of_property_read_bool(pdev->dev.of_node, "resin-pull-up");
+
+ error = regmap_update_bits(pwrkey->regmap,
+ pwrkey->baseaddr + PON_PULL_CTL,
+ PON_RESIN_PULL_UP,
+ pull_up ? PON_RESIN_PULL_UP : 0);
+ if (error) {
+ dev_err(&pdev->dev, "failed to set pull: %d\n", error);
+ return;
+ }
+
+ error = of_property_read_u32(pdev->dev.of_node, "linux,code", &code);
+ if (error) {
+ dev_err(&pdev->dev, "resin no linux,code %d\n", error);
+ return;
+ }
+
+ pwrkey->resin_code = code;
+
+ input_set_capability(pwrkey->input, EV_KEY, code);
+
+ error = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, pm8941_resin_irq,
+ IRQF_ONESHOT,
+ "pm8941_resin", pwrkey);
+ if (error)
+ dev_err(&pdev->dev, "failed requesting IRQ: %d\n", error);
+}
+
static int pm8941_pwrkey_probe(struct platform_device *pdev)
{
struct pm8941_pwrkey *pwrkey;
@@ -241,6 +303,8 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
return error;
}
+ pm8941_resin_setup(pdev, pwrkey);
+
error = input_register_device(pwrkey->input);
if (error) {
dev_err(&pdev->dev, "failed to register input device: %d\n",
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd1dc39f84ff2..232a54a732183 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -103,6 +103,8 @@ config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+source "drivers/iommu/qcom/Kconfig"
+
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6edb31bf8c65..f7134f703211a 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7c39ac4b9c537..cabb30e7dedad 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -39,6 +39,8 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_iommu.h>
+#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -274,21 +276,34 @@ enum arm_smmu_arch_version {
ARM_SMMU_V2,
};
-struct arm_smmu_smr {
- u8 idx;
+#define STREAM_UNASSIGNED 0
+#define STREAM_MULTIPLE -1
+struct arm_smmu_streamid {
+ int s2cr_idx;
+ u16 mask;
+ u16 id;
+};
+
+struct arm_smmu_stream_map_entry {
u16 mask;
u16 id;
};
struct arm_smmu_master_cfg {
+ struct arm_smmu_device *smmu;
+ struct arm_smmu_domain *smmu_domain;
int num_streamids;
- u16 streamids[MAX_MASTER_STREAMIDS];
- struct arm_smmu_smr *smrs;
+ struct arm_smmu_streamid streamids[MAX_MASTER_STREAMIDS];
+};
+
+struct arm_smmu_group_cfg {
+ int num_smes;
+ struct arm_smmu_stream_map_entry smes[MAX_MASTER_STREAMIDS];
};
struct arm_smmu_master {
struct device_node *of_node;
- struct rb_node node;
+ struct list_head list;
struct arm_smmu_master_cfg cfg;
};
@@ -318,6 +333,7 @@ struct arm_smmu_device {
u32 num_mapping_groups;
DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
+ struct iommu_group **group_lut;
unsigned long va_size;
unsigned long ipa_size;
@@ -329,6 +345,9 @@ struct arm_smmu_device {
struct list_head list;
struct rb_root masters;
+ int num_clocks;
+ struct clk **clocks;
+ struct regulator *regulator;
};
struct arm_smmu_cfg {
@@ -359,8 +378,8 @@ struct arm_smmu_domain {
static struct iommu_ops arm_smmu_ops;
-static DEFINE_SPINLOCK(arm_smmu_devices_lock);
-static LIST_HEAD(arm_smmu_devices);
+static DEFINE_RWLOCK(arm_smmu_masters_lock);
+static LIST_HEAD(arm_smmu_masters);
struct arm_smmu_option_prop {
u32 opt;
@@ -377,6 +396,32 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain);
}
+static int arm_smmu_enable_clocks(struct arm_smmu_device *smmu)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < smmu->num_clocks; ++i) {
+ ret = clk_prepare_enable(smmu->clocks[i]);
+ if (ret) {
+ dev_err(smmu->dev, "Couldn't enable clock #%d\n", i);
+ while (i--)
+ clk_disable_unprepare(smmu->clocks[i]);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void arm_smmu_disable_clocks(struct arm_smmu_device *smmu)
+{
+ int i;
+
+ for (i = 0; i < smmu->num_clocks; ++i)
+ clk_disable_unprepare(smmu->clocks[i]);
+}
+
+
static void parse_driver_options(struct arm_smmu_device *smmu)
{
int i = 0;
@@ -404,125 +449,23 @@ static struct device_node *dev_get_dev_node(struct device *dev)
return dev->of_node;
}
-static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
- struct device_node *dev_node)
-{
- struct rb_node *node = smmu->masters.rb_node;
-
- while (node) {
- struct arm_smmu_master *master;
-
- master = container_of(node, struct arm_smmu_master, node);
-
- if (dev_node < master->of_node)
- node = node->rb_left;
- else if (dev_node > master->of_node)
- node = node->rb_right;
- else
- return master;
- }
-
- return NULL;
-}
-
-static struct arm_smmu_master_cfg *
-find_smmu_master_cfg(struct device *dev)
+static struct arm_smmu_master *find_smmu_master(struct device_node *dev_node)
{
- struct arm_smmu_master_cfg *cfg = NULL;
- struct iommu_group *group = iommu_group_get(dev);
-
- if (group) {
- cfg = iommu_group_get_iommudata(group);
- iommu_group_put(group);
- }
-
- return cfg;
-}
-
-static int insert_smmu_master(struct arm_smmu_device *smmu,
- struct arm_smmu_master *master)
-{
- struct rb_node **new, *parent;
-
- new = &smmu->masters.rb_node;
- parent = NULL;
- while (*new) {
- struct arm_smmu_master *this
- = container_of(*new, struct arm_smmu_master, node);
-
- parent = *new;
- if (master->of_node < this->of_node)
- new = &((*new)->rb_left);
- else if (master->of_node > this->of_node)
- new = &((*new)->rb_right);
- else
- return -EEXIST;
- }
-
- rb_link_node(&master->node, parent, new);
- rb_insert_color(&master->node, &smmu->masters);
- return 0;
-}
-
-static int register_smmu_master(struct arm_smmu_device *smmu,
- struct device *dev,
- struct of_phandle_args *masterspec)
-{
- int i;
struct arm_smmu_master *master;
- master = find_smmu_master(smmu, masterspec->np);
- if (master) {
- dev_err(dev,
- "rejecting multiple registrations for master device %s\n",
- masterspec->np->name);
- return -EBUSY;
- }
-
- if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
- dev_err(dev,
- "reached maximum number (%d) of stream IDs for master device %s\n",
- MAX_MASTER_STREAMIDS, masterspec->np->name);
- return -ENOSPC;
- }
-
- master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
- if (!master)
- return -ENOMEM;
-
- master->of_node = masterspec->np;
- master->cfg.num_streamids = masterspec->args_count;
-
- for (i = 0; i < master->cfg.num_streamids; ++i) {
- u16 streamid = masterspec->args[i];
-
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
- (streamid >= smmu->num_mapping_groups)) {
- dev_err(dev,
- "stream ID for master device %s greater than maximum allowed (%d)\n",
- masterspec->np->name, smmu->num_mapping_groups);
- return -ERANGE;
- }
- master->cfg.streamids[i] = streamid;
- }
- return insert_smmu_master(smmu, master);
-}
+ if (!dev_node)
+ return NULL;
-static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
-{
- struct arm_smmu_device *smmu;
- struct arm_smmu_master *master = NULL;
- struct device_node *dev_node = dev_get_dev_node(dev);
+ read_lock(&arm_smmu_masters_lock);
+ list_for_each_entry(master, &arm_smmu_masters, list)
+ if (master->of_node == dev_node)
+ goto out_unlock;
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(smmu, &arm_smmu_devices, list) {
- master = find_smmu_master(smmu, dev_node);
- if (master)
- break;
- }
- spin_unlock(&arm_smmu_devices_lock);
+ master = NULL;
- return master ? smmu : NULL;
+out_unlock:
+ read_unlock(&arm_smmu_masters_lock);
+ return master;
}
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -543,6 +486,23 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
clear_bit(idx, map);
}
+static int arm_smmu_enable_regulators(struct arm_smmu_device *smmu)
+{
+ if (!smmu->regulator)
+ return 0;
+
+ return regulator_enable(smmu->regulator);
+}
+
+static int arm_smmu_disable_regulators(struct arm_smmu_device *smmu)
+{
+ if (!smmu->regulator)
+ return 0;
+
+ return regulator_disable(smmu->regulator);
+}
+
+
/* Wait for any pending TLB invalidations to complete */
static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
{
@@ -1013,87 +973,97 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
kfree(smmu_domain);
}
-static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static int arm_smmu_master_configure_smrs(struct arm_smmu_master_cfg *cfg)
{
- int i;
- struct arm_smmu_smr *smrs;
+ int i, err;
+ struct arm_smmu_device *smmu = cfg->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ bool stream_match = smmu->features & ARM_SMMU_FEAT_STREAM_MATCH;
- if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
- return 0;
+ /* Allocate the SMRs on the SMMU */
+ for (i = 0; i < cfg->num_streamids; i++) {
+ int idx;
- if (cfg->smrs)
- return -EEXIST;
+ if (cfg->streamids[i].s2cr_idx == STREAM_MULTIPLE)
+ continue;
- smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
- if (!smrs) {
- dev_err(smmu->dev, "failed to allocate %d SMRs\n",
- cfg->num_streamids);
- return -ENOMEM;
- }
+ if (cfg->streamids[i].s2cr_idx > STREAM_UNASSIGNED) {
+ err = -EEXIST;
+ goto err_free_smrs;
+ }
+
+ if (stream_match)
+ idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+ smmu->num_mapping_groups);
+ else
+ idx = cfg->streamids[i].id;
- /* Allocate the SMRs on the SMMU */
- for (i = 0; i < cfg->num_streamids; ++i) {
- int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
- smmu->num_mapping_groups);
if (IS_ERR_VALUE(idx)) {
dev_err(smmu->dev, "failed to allocate free SMR\n");
+ err = -ENOSPC;
goto err_free_smrs;
}
- smrs[i] = (struct arm_smmu_smr) {
- .idx = idx,
- .mask = 0, /* We don't currently share SMRs */
- .id = cfg->streamids[i],
- };
+ cfg->streamids[i].s2cr_idx = idx + 1;
}
+ /* For stream indexing, we're only here for the bookkeeping... */
+ if (!stream_match)
+ return 0;
+
/* It worked! Now, poke the actual hardware */
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
- smrs[i].mask << SMR_MASK_SHIFT;
- writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
+ u32 reg = SMR_VALID | cfg->streamids[i].id << SMR_ID_SHIFT |
+ cfg->streamids[i].mask << SMR_MASK_SHIFT;
+
+ if (idx != STREAM_MULTIPLE)
+ writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(idx));
}
- cfg->smrs = smrs;
return 0;
err_free_smrs:
- while (--i >= 0)
- __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
- kfree(smrs);
- return -ENOSPC;
+ while (--i >= 0) {
+ int idx = cfg->streamids[i].s2cr_idx - 1;
+
+ if (idx != STREAM_MULTIPLE) {
+ if (stream_match)
+ __arm_smmu_free_bitmap(smmu->smr_map, idx);
+ cfg->streamids[i].s2cr_idx = STREAM_UNASSIGNED;
+ }
+ }
+ return err;
}
-static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_master_free_smrs(struct arm_smmu_master_cfg *cfg)
{
int i;
+ struct arm_smmu_device *smmu = cfg->smmu;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
- struct arm_smmu_smr *smrs = cfg->smrs;
-
- if (!smrs)
- return;
+ bool stream_match = smmu->features & ARM_SMMU_FEAT_STREAM_MATCH;
/* Invalidate the SMRs before freeing back to the allocator */
for (i = 0; i < cfg->num_streamids; ++i) {
- u8 idx = smrs[i].idx;
+ u8 idx = cfg->streamids[i].s2cr_idx - 1;
+
+ if (cfg->streamids[i].s2cr_idx == STREAM_MULTIPLE)
+ continue;
+
+ cfg->streamids[i].s2cr_idx = STREAM_UNASSIGNED;
+ if (!stream_match)
+ continue;
writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
__arm_smmu_free_bitmap(smmu->smr_map, idx);
}
-
- cfg->smrs = NULL;
- kfree(smrs);
}
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master_cfg *cfg)
{
int i, ret;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ void __iomem *gr0_base = ARM_SMMU_GR0(cfg->smmu);
/*
* FIXME: This won't be needed once we have IOMMU-backed DMA ops
@@ -1105,55 +1075,39 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
return 0;
/* Devices in an IOMMU group may already be configured */
- ret = arm_smmu_master_configure_smrs(smmu, cfg);
+ ret = arm_smmu_master_configure_smrs(cfg);
if (ret)
return ret == -EEXIST ? 0 : ret;
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx, s2cr;
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
+ u32 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
+ (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
- idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
- s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
- (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
return 0;
}
-static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
- struct arm_smmu_master_cfg *cfg)
+static void arm_smmu_domain_remove_master(struct arm_smmu_master_cfg *cfg)
{
int i;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-
- /* An IOMMU group is torn down by the first device to be removed */
- if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
- return;
+ void __iomem *gr0_base = ARM_SMMU_GR0(cfg->smmu);
/*
* We *must* clear the S2CR first, because freeing the SMR means
* that it can be re-allocated immediately.
*/
for (i = 0; i < cfg->num_streamids; ++i) {
- u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+ u32 idx = cfg->streamids[i].s2cr_idx - 1;
u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
}
- arm_smmu_master_free_smrs(smmu, cfg);
-}
-
-static void arm_smmu_detach_dev(struct device *dev,
- struct arm_smmu_master_cfg *cfg)
-{
- struct iommu_domain *domain = dev->archdata.iommu;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- dev->archdata.iommu = NULL;
- arm_smmu_domain_remove_master(smmu_domain, cfg);
+ cfg->smmu_domain = NULL;
+ arm_smmu_master_free_smrs(cfg);
}
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -1161,14 +1115,14 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu;
- struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
- smmu = find_smmu_for_device(dev);
- if (!smmu) {
+ if (!cfg) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
return -ENXIO;
}
+ smmu = cfg->smmu;
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
if (IS_ERR_VALUE(ret))
@@ -1185,18 +1139,13 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -EINVAL;
}
- /* Looks ok, so add the device to the domain */
- cfg = find_smmu_master_cfg(dev);
- if (!cfg)
- return -ENODEV;
-
/* Detach the dev from its current domain */
- if (dev->archdata.iommu)
- arm_smmu_detach_dev(dev, cfg);
+ if (cfg->smmu_domain)
+ arm_smmu_domain_remove_master(cfg);
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (!ret)
- dev->archdata.iommu = domain;
+ cfg->smmu_domain = smmu_domain;
return ret;
}
@@ -1318,75 +1267,74 @@ static bool arm_smmu_capable(enum iommu_cap cap)
}
}
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
+static int arm_smmu_add_dev_streamid(struct arm_smmu_device *smmu,
+ struct device *dev, u16 sid)
{
- *((u16 *)data) = alias;
- return 0; /* Continue walking */
-}
-
-static void __arm_smmu_release_pci_iommudata(void *data)
-{
- kfree(data);
-}
-
-static int arm_smmu_init_pci_device(struct pci_dev *pdev,
- struct iommu_group *group)
-{
- struct arm_smmu_master_cfg *cfg;
- u16 sid;
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
int i;
- cfg = iommu_group_get_iommudata(group);
if (!cfg) {
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
- iommu_group_set_iommudata(group, cfg,
- __arm_smmu_release_pci_iommudata);
+ cfg->smmu = smmu;
+ dev->archdata.iommu = cfg;
}
if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
return -ENOSPC;
- /*
- * Assume Stream ID == Requester ID for now.
- * We need a way to describe the ID mappings in FDT.
- */
- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
for (i = 0; i < cfg->num_streamids; ++i)
- if (cfg->streamids[i] == sid)
- break;
+ if (cfg->streamids[i].id == sid) {
+ dev_warn(dev, "Stream ID 0x%hx repeated; ignoring\n",
+ sid);
+ return 0;
+ }
- /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
- if (i == cfg->num_streamids)
- cfg->streamids[cfg->num_streamids++] = sid;
+ cfg->streamids[cfg->num_streamids++].id = sid;
return 0;
}
-static int arm_smmu_init_platform_device(struct device *dev,
- struct iommu_group *group)
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
{
- struct arm_smmu_device *smmu = find_smmu_for_device(dev);
- struct arm_smmu_master *master;
+ *((u16 *)data) = alias;
+ return 0; /* Continue walking */
+}
- if (!smmu)
- return -ENODEV;
+static int arm_smmu_init_legacy_master(struct device *dev)
+{
+ struct arm_smmu_master *master;
+ struct device_node *np = dev_get_dev_node(dev);
+ u16 sid;
- master = find_smmu_master(smmu, dev->of_node);
+ master = find_smmu_master(np);
if (!master)
return -ENODEV;
- iommu_group_set_iommudata(group, &master->cfg, NULL);
+ if (!dev_is_pci(dev)) {
+ dev->archdata.iommu = &master->cfg;
+ return 0;
+ }
- return 0;
+ /* Legacy bindings assume Stream ID == Requester ID */
+ pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid, &sid);
+ return arm_smmu_add_dev_streamid(master->cfg.smmu, dev, sid);
}
static int arm_smmu_add_device(struct device *dev)
{
struct iommu_group *group;
+ if (!dev->archdata.iommu) {
+ int ret = arm_smmu_init_legacy_master(dev);
+
+ if (ret)
+ return ret;
+ }
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -1398,29 +1346,113 @@ static int arm_smmu_add_device(struct device *dev)
static void arm_smmu_remove_device(struct device *dev)
{
iommu_group_remove_device(dev);
+ if (!find_smmu_master(dev->of_node))
+ kfree(dev->archdata.iommu);
+}
+
+static void __arm_smmu_release_iommudata(void *data)
+{
+ kfree(data);
+}
+
+static struct iommu_group *arm_smmu_group_lookup(struct device *dev)
+{
+ struct arm_smmu_master_cfg *cfg = dev->archdata.iommu;
+ struct arm_smmu_device *smmu = cfg->smmu;
+ struct iommu_group **lut = smmu->group_lut;
+ struct iommu_group *group = NULL;
+ int i;
+
+ if (!lut) {
+ /*
+ * Unfortunately this has to be sized for the worst-case until
+ * we get even cleverer with stream ID management.
+ */
+ lut = devm_kcalloc(smmu->dev, SMR_ID_MASK + 1,
+ sizeof(*lut), GFP_KERNEL);
+ if (lut)
+ smmu->group_lut = lut;
+ else
+ group = ERR_PTR(-ENOMEM);
+ } else {
+ /* Check for platform or cross-bus aliases */
+ for (i = 0; i < cfg->num_streamids; i++) {
+ struct iommu_group *tmp = lut[cfg->streamids[i].id];
+
+ if (!tmp)
+ continue;
+
+ if (group && tmp != group) {
+ dev_err(smmu->dev,
+ "Cannot handle master %s aliasing multiple groups\n",
+ dev_name(dev));
+ return ERR_PTR(-EBUSY);
+ }
+ group = tmp;
+ }
+ }
+ return group;
+}
+
+static inline bool __streamid_match_sme(struct arm_smmu_streamid *sid,
+ struct arm_smmu_stream_map_entry *sme)
+{
+ /* This will get rather more complicated with masking... */
+ return sid->id == sme->id;
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
+ struct arm_smmu_master_cfg *master_cfg = dev->archdata.iommu;
+ struct arm_smmu_group_cfg *group_cfg;
+ struct arm_smmu_streamid *sids;
+ struct arm_smmu_stream_map_entry *smes;
struct iommu_group *group;
- int ret;
+ int i, j;
- if (dev_is_pci(dev))
- group = pci_device_group(dev);
- else
- group = generic_device_group(dev);
+ group = arm_smmu_group_lookup(dev);
+ if (!group) {
+ if (dev_is_pci(dev))
+ group = pci_device_group(dev);
+ else
+ group = generic_device_group(dev);
+ }
if (IS_ERR(group))
return group;
- if (dev_is_pci(dev))
- ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
- else
- ret = arm_smmu_init_platform_device(dev, group);
+ group_cfg = iommu_group_get_iommudata(group);
+ if (!group_cfg) {
+ group_cfg = kzalloc(sizeof(*group_cfg), GFP_KERNEL);
+ if (!group_cfg)
+ return ERR_PTR(-ENOMEM);
- if (ret) {
- iommu_group_put(group);
- group = ERR_PTR(ret);
+ iommu_group_set_iommudata(group, group_cfg,
+ __arm_smmu_release_iommudata);
+ }
+
+ /* Propagate device's IDs to the group, avoiding duplicate entries */
+ sids = master_cfg->streamids;
+ smes = group_cfg->smes;
+ for (i = 0; i < master_cfg->num_streamids; i++) {
+ master_cfg->smmu->group_lut[sids[i].id] = group;
+
+ for (j = 0; j < group_cfg->num_smes; j++) {
+ if (__streamid_match_sme(&sids[i], &smes[j])) {
+ sids[i].s2cr_idx = STREAM_MULTIPLE;
+ break;
+ }
+ }
+
+ if (j < group_cfg->num_smes)
+ continue;
+
+ if (group_cfg->num_smes == MAX_MASTER_STREAMIDS) {
+ iommu_group_put(group);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ smes[group_cfg->num_smes++].id = sids[i].id;
}
return group;
@@ -1470,6 +1502,20 @@ out_unlock:
return ret;
}
+static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+{
+ struct arm_smmu_device *smmu;
+ struct platform_device *smmu_pdev;
+
+ smmu_pdev = of_find_device_by_node(args->np);
+ if (!smmu_pdev)
+ return -ENODEV;
+
+ smmu = platform_get_drvdata(smmu_pdev);
+
+ return arm_smmu_add_dev_streamid(smmu, dev, args->args[0]);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1484,6 +1530,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = arm_smmu_device_group,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
+ .of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1561,6 +1608,61 @@ static int arm_smmu_id_size_to_bits(int size)
}
}
+static int arm_smmu_init_regulators(struct arm_smmu_device *smmu)
+{
+ struct device *dev = smmu->dev;
+
+ if (!of_get_property(dev->of_node, "vdd-supply", NULL))
+ return 0;
+
+ smmu->regulator = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(smmu->regulator))
+ return PTR_ERR(smmu->regulator);
+
+ return 0;
+}
+
+static int arm_smmu_init_clocks(struct arm_smmu_device *smmu)
+{
+ const char *cname;
+ struct property *prop;
+ int i;
+ struct device *dev = smmu->dev;
+
+ smmu->num_clocks =
+ of_property_count_strings(dev->of_node, "clock-names");
+
+ if (smmu->num_clocks < 1)
+ return 0;
+
+ smmu->clocks = devm_kzalloc(
+ dev, sizeof(*smmu->clocks) * smmu->num_clocks,
+ GFP_KERNEL);
+
+ if (!smmu->clocks) {
+ dev_err(dev,
+ "Failed to allocate memory for clocks\n");
+ return -ENODEV;
+ }
+
+ i = 0;
+ of_property_for_each_string(dev->of_node, "clock-names",
+ prop, cname) {
+ struct clk *c = devm_clk_get(dev, cname);
+
+ if (IS_ERR(c)) {
+ dev_err(dev, "Couldn't get clock: %s",
+ cname);
+ return -EPROBE_DEFER;
+ }
+
+ smmu->clocks[i] = c;
+
+ ++i;
+ }
+ return 0;
+}
+
static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
{
unsigned long size;
@@ -1738,14 +1840,116 @@ static const struct of_device_id arm_smmu_of_match[] = {
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
+static void arm_smmu_remove_mmu_masters(struct arm_smmu_device *smmu)
+{
+ struct arm_smmu_master *master, *tmp;
+
+ write_lock(&arm_smmu_masters_lock);
+ list_for_each_entry_safe(master, tmp, &arm_smmu_masters, list) {
+ if (master->cfg.smmu != smmu)
+ continue;
+
+ list_del(&master->list);
+ of_node_put(master->of_node);
+ devm_kfree(smmu->dev, master);
+ }
+ write_unlock(&arm_smmu_masters_lock);
+}
+
+static int insert_smmu_master(struct arm_smmu_master *master)
+{
+ struct arm_smmu_master *tmp;
+ int ret = -EEXIST;
+
+ write_lock(&arm_smmu_masters_lock);
+ list_for_each_entry(tmp, &arm_smmu_masters, list)
+ if (tmp->of_node == master->of_node)
+ goto out_unlock;
+
+ ret = 0;
+ list_add(&master->list, &arm_smmu_masters);
+out_unlock:
+ write_unlock(&arm_smmu_masters_lock);
+ return ret;
+}
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+ struct of_phandle_args *masterspec)
+{
+ int i;
+ struct arm_smmu_master *master;
+
+ master = find_smmu_master(masterspec->np);
+ if (master) {
+ dev_err(smmu->dev,
+ "rejecting multiple registrations for master device %s\n",
+ masterspec->np->name);
+ return -EBUSY;
+ }
+
+ if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
+ dev_err(smmu->dev,
+ "reached maximum number (%d) of stream IDs for master device %s\n",
+ MAX_MASTER_STREAMIDS, masterspec->np->name);
+ return -ENOSPC;
+ }
+
+ master = devm_kzalloc(smmu->dev, sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return -ENOMEM;
+
+ master->of_node = masterspec->np;
+ master->cfg.num_streamids = masterspec->args_count;
+
+ for (i = 0; i < master->cfg.num_streamids; ++i) {
+ u16 streamid = masterspec->args[i];
+
+ if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+ (streamid >= smmu->num_mapping_groups)) {
+ dev_err(smmu->dev,
+ "stream ID for master device %s greater than maximum allowed (%d)\n",
+ masterspec->np->name, smmu->num_mapping_groups);
+ return -ERANGE;
+ }
+ master->cfg.streamids[i].id = streamid;
+ /* leave .mask 0; we don't currently share SMRs */
+ }
+ return insert_smmu_master(master);
+}
+
+static int arm_smmu_probe_mmu_masters(struct arm_smmu_device *smmu)
+{
+ struct of_phandle_args masterspec;
+ int err, i = 0;
+
+ dev_notice(smmu->dev,
+ "Deprecated \"mmu-masters\" property found; update DT to \"iommus\" property if possible\n");
+
+ while (!of_parse_phandle_with_args(smmu->dev->of_node, "mmu-masters",
+ "#stream-id-cells", i,
+ &masterspec)) {
+ err = register_smmu_master(smmu, &masterspec);
+ if (err)
+ break;
+ i++;
+ }
+
+ if (err) {
+ dev_err(smmu->dev, "failed to register mmu-masters\n");
+ arm_smmu_remove_mmu_masters(smmu);
+ } else if (i) {
+ dev_info(smmu->dev, "registered %d mmu-masters\n", i);
+ }
+
+ return err;
+}
+
static int arm_smmu_device_dt_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
struct resource *res;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
- struct rb_node *node;
- struct of_phandle_args masterspec;
int num_irqs, i, err;
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@@ -1800,25 +2004,25 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
smmu->irqs[i] = irq;
}
+ err = arm_smmu_init_regulators(smmu);
+ if (err)
+ goto out;
+
+ err = arm_smmu_init_clocks(smmu);
+ if (err)
+ goto out;
+
+ err = arm_smmu_enable_regulators(smmu);
+ if (err)
+ goto out;
+
+ err = arm_smmu_enable_clocks(smmu);
+ if (err)
+ goto out_disable_regulators;
+
err = arm_smmu_device_cfg_probe(smmu);
if (err)
- return err;
-
- i = 0;
- smmu->masters = RB_ROOT;
- while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
- "#stream-id-cells", i,
- &masterspec)) {
- err = register_smmu_master(smmu, dev, &masterspec);
- if (err) {
- dev_err(dev, "failed to add master %s\n",
- masterspec.np->name);
- goto out_put_masters;
- }
-
- i++;
- }
- dev_notice(dev, "registered %d master devices\n", i);
+ goto out_disable_clocks;
parse_driver_options(smmu);
@@ -1827,8 +2031,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
- err = -ENODEV;
- goto out_put_masters;
+ goto out_disable_clocks;
}
for (i = 0; i < smmu->num_global_irqs; ++i) {
@@ -1844,62 +2047,49 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
}
}
- INIT_LIST_HEAD(&smmu->list);
- spin_lock(&arm_smmu_devices_lock);
- list_add(&smmu->list, &arm_smmu_devices);
- spin_unlock(&arm_smmu_devices_lock);
-
+ platform_set_drvdata(pdev, smmu);
+ /* Check first to avoid of_parse_phandle_with_args complaining */
+ if (of_property_read_bool(dev->of_node, "mmu-masters"))
+ arm_smmu_probe_mmu_masters(smmu);
arm_smmu_device_reset(smmu);
+ of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
return 0;
out_free_irqs:
- while (i--)
- free_irq(smmu->irqs[i], smmu);
+ while (i--)
+ free_irq(smmu->irqs[i], smmu);
-out_put_masters:
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
- }
+out_disable_clocks:
+ arm_smmu_disable_clocks(smmu);
+
+out_disable_regulators:
+ arm_smmu_disable_regulators(smmu);
+out:
return err;
}
static int arm_smmu_device_remove(struct platform_device *pdev)
{
+ struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
int i;
- struct device *dev = &pdev->dev;
- struct arm_smmu_device *curr, *smmu = NULL;
- struct rb_node *node;
-
- spin_lock(&arm_smmu_devices_lock);
- list_for_each_entry(curr, &arm_smmu_devices, list) {
- if (curr->dev == dev) {
- smmu = curr;
- list_del(&smmu->list);
- break;
- }
- }
- spin_unlock(&arm_smmu_devices_lock);
if (!smmu)
return -ENODEV;
- for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
- struct arm_smmu_master *master
- = container_of(node, struct arm_smmu_master, node);
- of_node_put(master->of_node);
- }
+ arm_smmu_remove_mmu_masters(smmu);
if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
- dev_err(dev, "removing device with active domains!\n");
+ dev_err(&pdev->dev, "removing device with active domains!\n");
for (i = 0; i < smmu->num_global_irqs; ++i)
free_irq(smmu->irqs[i], smmu);
/* Turn the thing off */
writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+ arm_smmu_disable_clocks(smmu);
+ arm_smmu_disable_regulators(smmu);
+
return 0;
}
@@ -1915,8 +2105,11 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void)
{
struct device_node *np;
+ static bool done;
int ret;
+ if (done)
+ return 0;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
@@ -1946,6 +2139,7 @@ static int __init arm_smmu_init(void)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
#endif
+ done = true;
return 0;
}
@@ -1957,6 +2151,16 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit);
+static int __init arm_smmu_of_init(struct device_node *np)
+{
+ return arm_smmu_init();
+}
+IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
+IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
+
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 58f2fe687a24d..ded744ba93aa8 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -448,6 +448,10 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t iova_len = 0;
int i;
+ if (!iovad) {
+ iommu_get_dma_cookie(domain);
+ iovad = domain->iova_cookie;
+ }
/*
* Work out how much IOVA space we need, and align the segments to
* IOVA granules for the IOMMU driver to handle. With some clever
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b9df1411c8942..9f7293f235d91 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -901,6 +901,18 @@ static int remove_iommu_group(struct device *dev, void *data)
return 0;
}
+int iommu_bus_add_dev(struct device *dev)
+{
+ const struct iommu_ops *ops = dev->bus->iommu_ops;
+ int ret = -ENODEV;
+
+ if (ops->add_device)
+ ret = ops->add_device(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_bus_add_dev);
+
static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e321fa517a452..123a71c31e39a 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -424,12 +424,12 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
int i = 0;
for (i = 0; i < 16; i++)
*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
+ FL_AP1 | FL_AP0 | FL_TYPE_SECT |
FL_SHARED | FL_NG | pgprot;
}
if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
+ *fl_pte = (pa & 0xFFF00000) | FL_AP1 | FL_AP0 | FL_NG |
FL_TYPE_SECT | FL_SHARED | pgprot;
/* Need a 2nd level table */
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 5fea665af99d1..f722f55ddb0d6 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -22,6 +22,7 @@
#include <linux/limits.h>
#include <linux/of.h>
#include <linux/of_iommu.h>
+#include <linux/of_pci.h>
#include <linux/slab.h>
static const struct of_device_id __iommu_of_table_sentinel
@@ -134,20 +135,48 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct of_phandle_args *iommu_spec = data;
+ struct device_node *np = pdev->bus->dev.of_node;
+
+ iommu_spec->args[0] = alias;
+ return np == iommu_spec->np;
+}
+
struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
struct of_phandle_args iommu_spec;
- struct device_node *np;
+ struct device_node *np = NULL;
struct iommu_ops *ops = NULL;
int idx = 0;
- /*
- * We can't do much for PCI devices without knowing how
- * device IDs are wired up from the PCI bus to the IOMMU.
- */
- if (dev_is_pci(dev))
- return NULL;
+ if (dev_is_pci(dev)) {
+ /*
+ * Start by tracing the RID alias down the PCI topology as
+ * far as the host bridge whose OF node we have...
+ */
+ iommu_spec.np = master_np;
+ pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
+ &iommu_spec);
+ /*
+ * ...then find out what that becomes once it escapes the PCI
+ * bus into the system beyond, and which IOMMU it ends up at.
+ */
+ if (of_pci_map_rid(master_np, "iommu-map", iommu_spec.args[0],
+ &np, iommu_spec.args))
+ return NULL;
+
+ iommu_spec.np = np;
+ iommu_spec.args_count = 1;
+ ops = of_iommu_get_ops(np);
+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ goto err_put_node;
+
+ of_node_put(np);
+ return ops;
+ }
/*
* We don't currently walk up the tree looking for a parent IOMMU.
@@ -160,8 +189,18 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(&__iommu_of_table, np);
+ ops = oid ? ERR_PTR(-EPROBE_DEFER) : NULL;
goto err_put_node;
+ }
+
+ if (!ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) {
+ ops = NULL;
+ goto err_put_node;
+ }
of_node_put(np);
idx++;
@@ -171,7 +210,7 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
err_put_node:
of_node_put(np);
- return NULL;
+ return ops;
}
void __init of_iommu_init(void)
@@ -182,7 +221,7 @@ void __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig
new file mode 100644
index 0000000000000..b507e575cb958
--- /dev/null
+++ b/drivers/iommu/qcom/Kconfig
@@ -0,0 +1,44 @@
+# Qualcomm IOMMU support
+
+# QCOM IOMMUv1 support
+config QCOM_IOMMU_V1
+ bool "Qualcomm IOMMUv1 Support"
+ depends on ARCH_QCOM
+ select IOMMU_API
+ select QCOM_SCM
+ select ARM_DMA_USE_IOMMU if ARM
+ select ARM64_DMA_USE_IOMMU if ARM64
+ help
+ Support for the IOMMUs (v1) found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING
+ bool "Don't align virtual address at 1MB boundary"
+ depends on QCOM_IOMMU_V1
+ help
+ Say Y here if the MMU500 revision has a bug in active prefetch
+ which can cause TLB corruptions due to 1MB alignment of a buffer.
+ Here is the sequence which will surface this BUG.
+ 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of
+ the buffer should be at even MB boundary.
+ 2) Create a section mapping for 1MB buffer adjacent to previous
+ mapping in step 1.
+ 3) Access last page from 2 level mapping followed by an access into
+ section mapped area.
+ 4) Step 3 will result into TLB corruption and this corruption can
+ lead to any misbehavior (like Permission fault) for sub-sequent
+ transactions.
+
+ If unsure, say Y here if IOMMU mapping will not exhaust the VA space.
+
+config IOMMU_PGTABLES_L2
+ bool "Allow SMMU page tables in the L2 cache (Experimental)"
+ depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n
+ help
+ Improves TLB miss latency at the expense of potential L2 pollution.
+ However, with large multimedia buffers, the TLB should mostly contain
+ section mappings and TLB misses should be quite infrequent.
+ Most people can probably say Y here.
diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile
new file mode 100644
index 0000000000000..e0b1159227be4
--- /dev/null
+++ b/drivers/iommu/qcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o
+
+qcom-iommu-y += msm_iommu.o
+qcom-iommu-y += msm_iommu-v1.o
+qcom-iommu-y += msm_iommu_dev-v1.o
+qcom-iommu-y += msm_iommu_sec.o
+qcom-iommu-y += msm_iommu_pagetable.o
diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c
new file mode 100644
index 0000000000000..d7e7b4b706ac8
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu-v1.c
@@ -0,0 +1,1540 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
+#include <linux/iopoll.h>
+#include <linux/qcom_iommu.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_pagetable.h"
+
+#ifdef CONFIG_IOMMU_LPAE
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
+#else
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+#endif
+
+#define IOMMU_MSEC_STEP 10
+#define IOMMU_MSEC_TIMEOUT 5000
+
+static DEFINE_MUTEX(msm_iommu_lock);
+struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ ++drvdata->powered_on;
+
+ return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ --drvdata->powered_on;
+}
+
+static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
+{
+ int ret = 0;
+#ifdef CONFIG_MSM_BUS_SCALING
+ if (drvdata->bus_client) {
+ ret = msm_bus_scale_client_update_request(drvdata->bus_client,
+ vote);
+ if (ret)
+ pr_err("%s: Failed to vote for bus: %d\n", __func__,
+ vote);
+ }
+#endif
+ return ret;
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->iface);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->core);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ clk_disable_unprepare(drvdata->iface);
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->core);
+ clk_disable_unprepare(drvdata->iface);
+}
+
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(unsigned int need_extra_lock)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+ .iommu_power_on = __enable_regulators,
+ .iommu_power_off = __disable_regulators,
+ .iommu_bus_vote = apply_bus_vote,
+ .iommu_clk_on = __enable_clocks,
+ .iommu_clk_off = __disable_clocks,
+ .iommu_lock_acquire = _iommu_lock_acquire,
+ .iommu_lock_release = _iommu_lock_release,
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
+
+void msm_iommu_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_iommu_register_notify);
+
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+ pr_err("Halting VBIF_XIN\n");
+ writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+ unsigned int reg_val;
+
+ reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+ pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+ pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+ pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+ pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+ pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+ phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+ - (phys_addr_t) 0x4000);
+ void __iomem *base = ioremap(addr, 0x1000);
+ int ret = 0;
+
+ if (base) {
+ __dump_vbif_state(drvdata->base, base);
+ __halt_vbif_xin(base);
+ __dump_vbif_state(drvdata->base, base);
+ iounmap(base);
+ } else {
+ pr_err("%s: Unable to ioremap\n", __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+
+ if (res) {
+ pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+ name);
+ } else {
+ pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->cb_base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
+ name, priv->client_name);
+ blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
+ (void *) priv->client_name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if TLB sync completed for %s\n", name);
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res) {
+ pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+ name);
+ } else {
+ pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int val;
+ int res;
+
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+ if (res)
+ check_halt_state(iommu_drvdata);
+
+ /* Ensure device is idle before continuing */
+ mb();
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ /* Ensure transactions have completed before releasing the halt */
+ mb();
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+
+ /*
+ * Ensure write is complete before continuing to ensure
+ * we don't turn off clocks while transaction is still
+ * pending.
+ */
+ mb();
+}
+
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+ unsigned int val;
+ unsigned int res;
+
+ SET_TLBSYNC(base, ctx, 0);
+ /* No barrier needed due to read dependency */
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res)
+ check_tlb_sync_state(iommu_drvdata, ctx, priv);
+}
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid | (va & CB_TLBIVA_VA));
+ mb();
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+#endif
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ int i, smt_size, res;
+ unsigned long val;
+
+ /* SMMU_ACR is an implementation defined register.
+ * Resetting is not required for some implementation.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACR(base, 0);
+ SET_CR2(base, 0);
+ SET_GFAR(base, 0);
+ SET_GFSRRESTORE(base, 0);
+
+ /* Invalidate the entire non-secure TLB */
+ SET_TLBIALLNSNH(base, 0);
+ SET_TLBGSYNC(base, 0);
+ res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val,
+ (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000);
+ if (res)
+ BUG();
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ for (i = 0; i < smt_size; i++)
+ SET_SMR_VALID(base, i, 0);
+
+ mb();
+}
+
+static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model != MMU_500)
+ SET_NSACR(base, 0);
+ SET_NSCR2(base, 0);
+ SET_NSGFAR(base, 0);
+ SET_NSGFSRRESTORE(base, 0);
+ mb();
+}
+
+static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_NSACR_SMTNMC_BPTLBEN(base, 1);
+ SET_NSACR_MMUDIS_BPTLBEN(base, 1);
+ SET_NSACR_S2CR_BPTLBEN(base, 1);
+ }
+ SET_NSCR0_SMCFCFG(base, 1);
+ SET_NSCR0_USFCFG(base, 1);
+ SET_NSCR0_STALLD(base, 1);
+ SET_NSCR0_GCFGFIE(base, 1);
+ SET_NSCR0_GCFGFRE(base, 1);
+ SET_NSCR0_GFIE(base, 1);
+ SET_NSCR0_GFRE(base, 1);
+ SET_NSCR0_CLIENTPD(base, 0);
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(struct msm_iommu_drvdata *drvdata)
+{
+ __reset_iommu(drvdata);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __reset_iommu_secure(drvdata);
+
+ if (drvdata->model == MMU_500) {
+ SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1);
+ SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1);
+ SET_ACR_S2CR_BPTLBEN(drvdata->base, 1);
+ }
+ SET_CR0_SMCFCFG(drvdata->base, 1);
+ SET_CR0_USFCFG(drvdata->base, 1);
+ SET_CR0_STALLD(drvdata->base, 1);
+ SET_CR0_GCFGFIE(drvdata->base, 1);
+ SET_CR0_GCFGFRE(drvdata->base, 1);
+ SET_CR0_GFIE(drvdata->base, 1);
+ SET_CR0_GFRE(drvdata->base, 1);
+ SET_CR0_CLIENTPD(drvdata->base, 0);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __program_iommu_secure(drvdata);
+
+ if (drvdata->smmu_local_base)
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+
+ mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings)
+{
+ unsigned int i;
+
+ if (bfb_settings)
+ for (i = 0; i < bfb_settings->length; i++)
+ SET_GLOBAL_REG(base, bfb_settings->regs[i],
+ bfb_settings->data[i]);
+
+ /* Make sure writes complete before returning */
+ mb();
+}
+
+static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+
+ /* Don't set ACTLR to zero because if context bank is in
+ * bypass mode (say after iommu_detach), still this ACTLR
+ * value matters for micro-TLB caching.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACTLR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ mb();
+}
+
+static void __release_smg(void __iomem *base)
+{
+ int i, smt_size;
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Invalidate all SMGs */
+ for (i = 0; i < smt_size; i++)
+ if (GET_SMR_VALID(base, i))
+ SET_SMR_VALID(base, i, 0);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_TTBR0_ASID(base, ctx_num, asid);
+}
+#else
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
+}
+#endif
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+
+ curr_ctx->asid = curr_ctx->num;
+ msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
+ SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /*
+ * Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
+
+
+ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
+}
+
+#else
+
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ /* Turn on TEX Remap */
+ SET_CB_SCTLR_TRE(base, ctx, 1);
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_PRRR(base, ctx, msm_iommu_get_prrr());
+ SET_NMRR(base, ctx, msm_iommu_get_nmrr());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBR0_S(base, ctx, 1);
+ SET_CB_TTBR0_NOS(base, ctx, 1);
+ SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+ SET_CB_TTBR0_IRGN0(base, ctx, 1);
+ SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
+}
+
+#endif
+
+static int program_m2v_table(struct device *dev, void __iomem *base)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
+ u32 *sids = ctx_drvdata->sids;
+ u32 *sid_mask = ctx_drvdata->sid_mask;
+ unsigned int ctx = ctx_drvdata->num;
+ int num = 0, i, smt_size;
+ int len = ctx_drvdata->nsid;
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < len / sizeof(*sids); i++) {
+ for (; num < smt_size; num++)
+ if (GET_SMR_VALID(base, num) == 0)
+ break;
+ BUG_ON(num >= smt_size);
+
+ SET_SMR_VALID(base, num, 1);
+ SET_SMR_MASK(base, num, sid_mask[i]);
+ SET_SMR_ID(base, num, sids[i]);
+
+ SET_S2CR_N(base, num, 0);
+ SET_S2CR_CBNDX(base, num, ctx);
+ SET_S2CR_MEMATTR(base, num, 0x0A);
+ /* Set security bit override to be Non-secure */
+ SET_S2CR_NSCFG(base, num, 3);
+ }
+
+ return 0;
+}
+
+static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
+ program_m2v_table);
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ struct msm_iommu_priv *priv, bool is_secure,
+ bool program_m2v)
+{
+ phys_addr_t pn;
+ void __iomem *base = iommu_drvdata->base;
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+ unsigned int ctx = ctx_drvdata->num;
+ phys_addr_t pgtable = __pa(priv->pt.fl_table);
+
+ __reset_context(iommu_drvdata, ctx);
+ msm_iommu_setup_ctx(cb_base, ctx);
+
+ if (priv->pt.redirect)
+ msm_iommu_setup_pg_l2_redirect(cb_base, ctx);
+
+ msm_iommu_setup_memory_remap(cb_base, ctx);
+
+ pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+ SET_CB_TTBR0_ADDR(cb_base, ctx, pn);
+
+ /* Enable context fault interrupt */
+ SET_CB_SCTLR_CFIE(cb_base, ctx, 1);
+
+ if (iommu_drvdata->model != MMU_500) {
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1);
+ }
+
+ /* Enable private ASID namespace */
+ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1);
+
+ if (!is_secure) {
+ if (program_m2v)
+ program_all_m2v_tables(iommu_drvdata);
+
+ SET_CBAR_N(base, ctx, 0);
+
+ /* Stage 1 Context with Stage 2 bypass */
+ SET_CBAR_TYPE(base, ctx, 1);
+
+ /* Route page faults to the non-secure interrupt */
+ SET_CBAR_IRPTNDX(base, ctx, 1);
+
+ /* Set VMID to non-secure HLOS */
+ SET_CBAR_VMID(base, ctx, 3);
+
+ /* Bypass is treated as inner-shareable */
+ SET_CBAR_BPSHCFG(base, ctx, 2);
+
+ /* Do not downgrade memory attributes */
+ SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
+ }
+
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
+
+ /* Ensure that ASID assignment has completed before we use
+ * ASID for TLB invalidation. Here, mb() is required because
+ * both these registers are separated by more than 1KB. */
+ mb();
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+
+ /* Enable the MMU */
+ SET_CB_SCTLR_M(cb_base, ctx, 1);
+ mb();
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+#define INITIAL_REDIRECT_VAL 1
+#else
+#define INITIAL_REDIRECT_VAL 0
+#endif
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENOMEM;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto fail_nomem;
+
+ priv->pt.redirect = INITIAL_REDIRECT_VAL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+
+ ret = msm_iommu_pagetable_alloc(&priv->pt);
+ if (ret)
+ goto fail_nomem;
+
+ domain = &priv->domain;
+
+ return domain;
+
+fail_nomem:
+ kfree(priv);
+ return 0;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+
+ if (priv)
+ msm_iommu_pagetable_free(&priv->pt);
+
+ kfree(priv);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ int is_secure;
+ bool set_m2v = false;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ++ctx_drvdata->attach_count;
+
+ if (ctx_drvdata->attach_count > 1)
+ goto already_attached;
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ ret = __enable_regulators(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ ret = apply_bus_vote(iommu_drvdata, 1);
+ if (ret)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ goto unlock;
+ }
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ if (!is_secure) {
+ iommu_halt(iommu_drvdata);
+ __program_iommu(iommu_drvdata);
+ iommu_resume(iommu_drvdata);
+ } else {
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ __disable_clocks(iommu_drvdata);
+ goto unlock;
+ }
+ }
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+ set_m2v = true;
+ }
+
+ iommu_halt(iommu_drvdata);
+
+ __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
+
+ iommu_resume(iommu_drvdata);
+
+ /* Ensure TLB is clear */
+ if (iommu_drvdata->model != MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+already_attached:
+ mutex_unlock(&msm_iommu_lock);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+ int is_secure;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto unlock;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto unlock;
+
+ --ctx_drvdata->attach_count;
+ BUG_ON(ctx_drvdata->attach_count < 0);
+
+ if (ctx_drvdata->attach_count > 0)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ ctx_drvdata->asid = -1;
+
+ __reset_context(iommu_drvdata, ctx_drvdata->num);
+
+ /*
+ * Only reset the M2V tables on the very last detach */
+ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
+ iommu_halt(iommu_drvdata);
+ __release_smg(iommu_drvdata->base);
+ iommu_resume(iommu_drvdata);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ apply_bus_vote(iommu_drvdata, 0);
+
+ __disable_regulators(iommu_drvdata);
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_priv *priv;
+ int ret = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ ret = __flush_iotlb_va(domain, va);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENODEV;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto fail;
+
+ ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
+ if (ret < 0)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
+{
+ struct msm_iommu_priv *priv;
+ struct scatterlist *tmp;
+ unsigned int len = 0;
+ int ret, i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ __flush_iotlb(domain);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+ __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+ mutex_unlock(&msm_iommu_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* Upper 28 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
+
+ return phy;
+}
+#else
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* We are dealing with a supersection */
+ if (par & CB_PAR_SS)
+ phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ return phy;
+}
+#endif
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ u64 par;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+ int i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ if (iommu_drvdata->model == MMU_500) {
+ ret = msm_iommu_iova_to_phys_soft(domain, va);
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+ }
+
+ base = iommu_drvdata->cb_base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ ret = 0; /* 0 indicates translation failed */
+ goto fail;
+ }
+
+ SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+ mb();
+ for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
+ if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
+ break;
+ else
+ msleep(IOMMU_MSEC_STEP);
+
+ if (i >= IOMMU_MSEC_TIMEOUT) {
+ pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
+ __func__, &va, iommu_drvdata->name, ctx_drvdata->name);
+ ret = 0;
+ goto fail;
+ }
+
+ par = GET_PAR(base, ctx);
+ __disable_clocks(iommu_drvdata);
+
+ if (par & CB_PAR_F) {
+ unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
+ pr_err("IOMMU translation fault!\n");
+ pr_err("name = %s\n", iommu_drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
+ (par & CB_PAR_F) ? "F " : "",
+ (par & CB_PAR_TF) ? "TF " : "",
+ (par & CB_PAR_AFF) ? "AFF " : "",
+ (par & CB_PAR_PF) ? "PF " : "",
+ (par & CB_PAR_EF) ? "EF " : "",
+ (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
+ (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
+ (par & CB_PAR_ATOT) ? "ATOT " : "",
+ level,
+ (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
+ ret = 0;
+ } else {
+ ret = msm_iommu_get_phy_from_PAR(va, par);
+ }
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("MAIR0 = %08x MAIR1 = %08x\n",
+ regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val);
+}
+#else
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
+}
+#endif
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[])
+{
+ uint32_t fsr = regs[DUMP_REG_FSR].val;
+ u64 ttbr;
+ enum dump_reg iter;
+
+ pr_err("FAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_FAR1].val,
+ regs[DUMP_REG_FAR0].val));
+ pr_err("PAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_PAR1].val,
+ regs[DUMP_REG_PAR0].val));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
+ regs[DUMP_REG_TTBR0_0].val);
+ if (regs[DUMP_REG_TTBR0_1].valid)
+ pr_err("TTBR0 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR0 = %016llx (32b)\n", ttbr);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
+ regs[DUMP_REG_TTBR1_0].val);
+
+ if (regs[DUMP_REG_TTBR1_1].valid)
+ pr_err("TTBR1 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR1 = %016llx (32b)\n", ttbr);
+
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
+ pr_err("CBAR = %08x CBFRSYNRA = %08x\n",
+ regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val);
+ print_ctx_mem_attr_regs(regs);
+
+ for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter)
+ if (!regs[iter].valid)
+ pr_err("NOTE: Value actually unknown for %s\n",
+ dump_regs_tbl[iter].name);
+}
+
+static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
+ unsigned int fsr)
+{
+ void __iomem *base = drvdata->base;
+ void __iomem *cb_base = drvdata->cb_base;
+ bool is_secure = drvdata->sec_id != -1;
+
+ struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
+ unsigned int i;
+ memset(regs, 0, sizeof(regs));
+
+ for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
+ struct msm_iommu_context_reg *r = &regs[i];
+ unsigned long regaddr = dump_regs_tbl[i].reg_offset;
+ if (is_secure &&
+ dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
+ r->valid = 0;
+ continue;
+ }
+ r->valid = 1;
+ switch (dump_regs_tbl[i].dump_reg_type) {
+ case DRT_CTX_REG:
+ r->val = GET_CTX_REG(regaddr, cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ r->val = GET_GLOBAL_REG(regaddr, base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ r->val = GET_GLOBAL_REG_N(regaddr, ctx, base);
+ break;
+ default:
+ pr_info("Unknown dump_reg_type...\n");
+ r->valid = 0;
+ break;
+ }
+ }
+ print_ctx_regs(regs);
+}
+
+static void print_global_regs(void __iomem *base, unsigned int gfsr)
+{
+ pr_err("GFAR = %016llx\n", GET_GFAR(base));
+
+ pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr,
+ (gfsr & 0x01) ? "ICF " : "",
+ (gfsr & 0x02) ? "USF " : "",
+ (gfsr & 0x04) ? "SMCF " : "",
+ (gfsr & 0x08) ? "UCBF " : "",
+ (gfsr & 0x10) ? "UCIF " : "",
+ (gfsr & 0x20) ? "CAF " : "",
+ (gfsr & 0x40) ? "EF " : "",
+ (gfsr & 0x80) ? "PF " : "",
+ (gfsr & 0x40000000) ? "SS " : "",
+ (gfsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base));
+ pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base));
+ pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base));
+}
+
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ unsigned int gfsr;
+ int ret;
+
+ mutex_lock(&msm_iommu_lock);
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU global fault !!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Can't read global fault information\n");
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ if (drvdata->sec_id != -1) {
+ pr_err("NON-secure interrupt from secure %s\n", drvdata->name);
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ gfsr = GET_GFSR(drvdata->base);
+ if (gfsr) {
+ pr_err("Unexpected %s global fault !!\n", drvdata->name);
+ print_global_regs(drvdata->base, gfsr);
+ SET_GFSR(drvdata->base, gfsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int fsr;
+ int ret;
+
+ phys_addr_t pagetable_phys;
+ u64 faulty_iova = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num);
+ if (fsr) {
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ ret = -ENOSYS;
+ } else {
+ faulty_iova =
+ GET_FAR(drvdata->cb_base, ctx_drvdata->num);
+ ret = report_iommu_fault(ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ faulty_iova, 0);
+
+ }
+ if (ret == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ __print_ctx_regs(drvdata,
+ ctx_drvdata->num, fsr);
+
+ if (ctx_drvdata->attached_domain) {
+ pagetable_phys = msm_iommu_iova_to_phys_soft(
+ ctx_drvdata->attached_domain,
+ faulty_iova);
+ pr_err("Page table in DDR shows PA = %x\n",
+ (unsigned int) pagetable_phys);
+ }
+ }
+
+ if (ret != -EBUSY)
+ SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+
+ return __pa(priv->pt.fl_table);
+}
+
+#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \
+ do { \
+ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \
+ dump_regs_tbl[dump_reg].name = #cb_reg; \
+ dump_regs_tbl[dump_reg].must_be_present = mbp; \
+ dump_regs_tbl[dump_reg].dump_reg_type = drt; \
+ } while (0)
+
+static void msm_iommu_build_dump_regs_table(void)
+{
+ DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N);
+ DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N);
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ priv->pt.redirect = !(*no_redirect);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ *no_redirect = !priv->pt.redirect;
+ mutex_unlock(&msm_iommu_lock);
+}
+
+#else
+
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+}
+#endif
+
+static int msm_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_set_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_get_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .map_sg = msm_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+/* .get_pt_base_addr = msm_iommu_get_pt_base_addr,*/
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .domain_set_attr = msm_iommu_domain_set_attr,
+ .domain_get_attr = msm_iommu_domain_get_attr,
+};
+
+static int __init msm_iommu_init(void)
+{
+ int ret;
+
+ msm_iommu_pagetable_init();
+
+ ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+ if (ret)
+ return ret;
+
+ msm_iommu_build_dump_regs_table();
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c
new file mode 100644
index 0000000000000..08d8d10da2c45
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+/* These values come from proc-v7-2level.S */
+#define PRRR_VALUE 0xff0a81a8
+#define NMRR_VALUE 0x40e040e0
+
+/* These values come from proc-v7-3level.S */
+#define MAIR0_VALUE 0xeeaa4400
+#define MAIR1_VALUE 0xff000004
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct bus_type msm_iommu_sec_bus_type = {
+ .name = "msm_iommu_sec_bus",
+};
+
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+ iommu_access_ops = ops;
+}
+
+struct iommu_access_ops *msm_get_iommu_access_ops()
+{
+ BUG_ON(iommu_access_ops == NULL);
+ return iommu_access_ops;
+}
+EXPORT_SYMBOL(msm_get_iommu_access_ops);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_add(&drv->list, &iommu_list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_del(&drv->list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+ struct msm_iommu_ctx_drvdata *c;
+
+ c = dev_get_drvdata(dev);
+ if (!c || !c->name)
+ return 0;
+
+ return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+ return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct msm_iommu_drvdata *drv;
+ struct device *dev = NULL;
+
+ mutex_lock(&iommu_list_lock);
+ list_for_each_entry(drv, &iommu_list, list) {
+ dev = find_context(drv->dev, ctx_name);
+ if (dev)
+ break;
+ }
+ mutex_unlock(&iommu_list_lock);
+
+ put_device(dev);
+
+ if (!dev || !dev_get_drvdata(dev)) {
+ pr_debug("Could not find context <%s>\n", ctx_name);
+ dev = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+#ifdef CONFIG_ARM
+#ifdef CONFIG_IOMMU_LPAE
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
+ * register directly
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ unsigned int mair0;
+
+ RCP15_MAIR0(mair0);
+ return mair0;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ unsigned int mair1;
+
+ RCP15_MAIR1(mair1);
+ return mair1;
+}
+#else
+/*
+ * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
+ * we'll just use the hard coded values directly..
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ return MAIR0_VALUE;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ return MAIR1_VALUE;
+}
+#endif
+
+#else
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
+ * we must use the hardcoded values.
+ */
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#else
+/*
+ * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
+ * we can use the registers directly.
+ */
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+u32 msm_iommu_get_prrr(void)
+{
+ u32 prrr;
+
+ RCP15_PRRR(prrr);
+ return prrr;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ u32 nmrr;
+
+ RCP15_NMRR(nmrr);
+ return nmrr;
+}
+#endif
+#endif
+#endif
+#ifdef CONFIG_ARM64
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c
new file mode 100644
index 0000000000000..60ebc6eeafb6e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c
@@ -0,0 +1,708 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "msm_iommu_hw-v1.h"
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_perfmon.h"
+#include <linux/qcom_scm.h>
+
+static const struct of_device_id msm_iommu_ctx_match_table[];
+
+#ifdef CONFIG_IOMMU_LPAE
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
+#else
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
+#endif
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct msm_iommu_bfb_settings *bfb_settings;
+ u32 nreg, nval;
+ int ret;
+
+ /*
+ * It is not valid for a device to have the BFB_REG_NODE_NAME
+ * property but not the BFB_DATA_NODE_NAME property, and vice versa.
+ */
+ if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
+ if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
+ &nval))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
+ return -EINVAL;
+
+ if (nreg >= sizeof(bfb_settings->regs))
+ return -EINVAL;
+
+ if (nval >= sizeof(bfb_settings->data))
+ return -EINVAL;
+
+ if (nval != nreg)
+ return -EINVAL;
+
+ bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+ GFP_KERNEL);
+ if (!bfb_settings)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_REG_NODE_NAME,
+ bfb_settings->regs,
+ nreg / sizeof(*bfb_settings->regs));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_DATA_NODE_NAME,
+ bfb_settings->data,
+ nval / sizeof(*bfb_settings->data));
+ if (ret)
+ return ret;
+
+ bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+ drvdata->bfb_settings = bfb_settings;
+
+ return 0;
+}
+
+static int __get_bus_vote_client(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ int ret = 0;
+#ifdef CONFIG_MSM_BUS_SCALING
+ struct msm_bus_scale_pdata *bs_table;
+ const char *dummy;
+
+ /* Check whether bus scaling has been specified for this node */
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
+ &dummy);
+ if (ret)
+ return 0;
+
+ bs_table = msm_bus_cl_get_pdata(pdev);
+ if (bs_table) {
+ drvdata->bus_client = msm_bus_scale_register_client(bs_table);
+ if (IS_ERR(&drvdata->bus_client)) {
+ pr_err("%s(): Bus client register failed.\n", __func__);
+ ret = -EINVAL;
+ }
+ }
+#endif
+ return ret;
+}
+
+static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
+{
+#ifdef CONFIG_MSM_BUS_SCALING
+ msm_bus_scale_unregister_client(drvdata->bus_client);
+ drvdata->bus_client = 0;
+#endif
+}
+
+/*
+ * CONFIG_IOMMU_NON_SECURE allows us to override the secure
+ * designation of SMMUs in device tree. With this config enabled
+ * all SMMUs will be programmed by this driver.
+ */
+#ifdef CONFIG_IOMMU_NON_SECURE
+static inline void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+}
+
+static inline void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ ctx_drvdata->secure_context = 0;
+}
+#else
+static void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+ if (msm_iommu_get_scm_call_avail())
+ of_property_read_u32(node, "qcom,iommu-secure-id",
+ &drvdata->sec_id);
+}
+
+static void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ u32 secure_ctx = 0;
+
+ if (msm_iommu_get_scm_call_avail())
+ secure_ctx = of_property_read_bool(node, "qcom,secure-context");
+
+ ctx_drvdata->secure_context = secure_ctx;
+}
+#endif
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct device_node *child;
+ int ret;
+
+ drvdata->dev = &pdev->dev;
+
+ ret = __get_bus_vote_client(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child)
+ drvdata->ncb++;
+
+ ret = of_property_read_string(pdev->dev.of_node, "label",
+ &drvdata->name);
+ if (ret)
+ goto fail;
+
+ drvdata->sec_id = -1;
+ get_secure_id(pdev->dev.of_node, drvdata);
+
+ drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,iommu-enable-halt");
+
+ msm_iommu_add_drv(drvdata);
+
+ return 0;
+
+fail:
+ __put_bus_vote_client(drvdata);
+ return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+ struct iommu_pmon *pmon_info)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int cls_prop_size;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (irq <= 0) {
+ pmon_info->iommu.evt_irq = -1;
+ return irq;
+ }
+
+ pmon_info->iommu.evt_irq = irq;
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
+ &pmon_info->num_groups);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
+ &pmon_info->num_counters);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
+ return ret;
+ }
+
+ if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
+ &cls_prop_size)) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return -EINVAL;
+ }
+
+ pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
+ GFP_KERNEL);
+ if (!pmon_info->event_cls_supported) {
+ dev_err(dev, "Unable to get memory for event class array\n");
+ return -ENOMEM;
+ }
+
+ pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
+ pmon_info->event_cls_supported,
+ pmon_info->nevent_cls_supported);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#define SCM_SVC_MP 0xc
+#define MAXIMUM_VIRT_SIZE (300 * SZ_1M)
+#define MAKE_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+static int msm_iommu_sec_ptbl_init(struct device *dev)
+{
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret;
+ int version;
+ void *cpu_addr;
+ dma_addr_t paddr;
+ DEFINE_DMA_ATTRS(attrs);
+ static bool allocated = false;
+
+ if (allocated)
+ return 0;
+
+ version = qcom_scm_get_feat_version(SCM_SVC_MP);
+
+ if (version >= MAKE_VERSION(1, 1, 1)) {
+ ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
+ if (ret) {
+ dev_err(dev, "failed setting max virtual size (%d)\n",
+ ret);
+ return ret;
+ }
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
+ if (ret) {
+ dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
+ ret);
+ return ret;
+ }
+
+ if (psize[1]) {
+ dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
+ ret);
+ return psize[1];
+ }
+
+ dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+
+ cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to allocate %d bytes for pgtable\n",
+ psize[0]);
+ return -ENOMEM;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
+ if (ret) {
+ dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
+ goto free_mem;
+ }
+
+ allocated = true;
+
+ return 0;
+
+free_mem:
+ dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
+ return ret;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iommu_pmon *pmon_info;
+ struct msm_iommu_drvdata *drvdata;
+ struct resource *res;
+ int ret;
+ int global_cfg_irq, global_client_irq;
+ u32 temp;
+ unsigned long rate;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+ drvdata->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->glb_base = drvdata->base;
+ drvdata->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smmu_local_base");
+ drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->smmu_local_base) &&
+ PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
+ drvdata->smmu_local_base = NULL;
+
+ if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
+ drvdata->model = MMU_500;
+
+ drvdata->iface = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(drvdata->iface))
+ return PTR_ERR(drvdata->iface);
+
+ drvdata->core = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->core))
+ return PTR_ERR(drvdata->core);
+
+ if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
+ drvdata->cb_base = drvdata->base + temp;
+ else
+ drvdata->cb_base = drvdata->base + 0x8000;
+
+ rate = clk_get_rate(drvdata->core);
+ if (!rate) {
+ rate = clk_round_rate(drvdata->core, 1000);
+ clk_set_rate(drvdata->core, rate);
+ }
+
+ dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
+ clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
+
+ ret = msm_iommu_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
+ drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
+
+ if (drvdata->sec_id != -1) {
+ ret = msm_iommu_sec_ptbl_init(dev);
+ if (ret)
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, drvdata);
+
+ pmon_info = msm_iommu_pm_alloc(dev);
+ if (pmon_info) {
+ ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+ if (ret) {
+ msm_iommu_pm_free(dev);
+ dev_info(dev, "%s: pmon not available\n",
+ drvdata->name);
+ } else {
+ pmon_info->iommu.base = drvdata->base;
+ pmon_info->iommu.ops = msm_get_iommu_access_ops();
+ pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+ pmon_info->iommu.iommu_name = drvdata->name;
+ ret = msm_iommu_pm_iommu_register(pmon_info);
+ if (ret) {
+ dev_err(dev, "%s iommu register fail\n",
+ drvdata->name);
+ msm_iommu_pm_free(dev);
+ } else {
+ dev_dbg(dev, "%s iommu registered for pmon\n",
+ pmon_info->iommu.iommu_name);
+ }
+ }
+ }
+
+ global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
+ if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (global_cfg_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_cfg_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_cfg_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
+ global_cfg_irq, ret);
+ }
+
+ global_client_irq =
+ platform_get_irq_byname(pdev, "global_client_NS_irq");
+ if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (global_client_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_client_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_client_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
+ global_client_irq, ret);
+ }
+
+ ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
+ if (ret)
+ dev_err(dev, "Failed to create iommu context device\n");
+
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv;
+
+ msm_iommu_pm_iommu_unregister(&pdev->dev);
+ msm_iommu_pm_free(&pdev->dev);
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ __put_bus_vote_client(drv);
+ msm_iommu_remove_drv(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ struct resource *r, rp;
+ int irq = 0, ret = 0;
+ struct msm_iommu_drvdata *drvdata;
+ u32 nsid;
+ u32 n_sid_mask;
+ unsigned long cb_offset;
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
+
+ if (ctx_drvdata->secure_context) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_secure_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ return ret;
+ }
+ }
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_nonsecure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ goto out;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+ if (ret)
+ goto out;
+
+ /* Calculate the context bank number using the base addresses.
+ * Typically CB0 base address is 0x8000 pages away if the number
+ * of CBs are <=8. So, assume the offset 0x8000 until mentioned
+ * explicitely.
+ */
+ cb_offset = drvdata->cb_base - drvdata->base;
+ ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
+
+ if (of_property_read_string(pdev->dev.of_node, "label",
+ &ctx_drvdata->name))
+ ctx_drvdata->name = dev_name(&pdev->dev);
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nsid >= sizeof(ctx_drvdata->sids)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->nsid = nsid;
+ ctx_drvdata->asid = -1;
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ &n_sid_mask)) {
+ memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
+ goto out;
+ }
+
+ if (n_sid_mask != nsid) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ ctx_drvdata->sid_mask,
+ n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->n_sid_mask = n_sid_mask;
+
+out:
+ return ret;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -EINVAL;
+
+ ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+ GFP_KERNEL);
+ if (!ctx_drvdata)
+ return -ENOMEM;
+
+ ctx_drvdata->pdev = pdev;
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+
+ ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n",
+ ctx_drvdata->name, ctx_drvdata->num);
+
+ return 0;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id msm_iommu_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ {}
+};
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_match_table,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static const struct of_device_id msm_iommu_ctx_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1-ctx", },
+ { .compatible = "qcom,msm-smmu-v2-ctx", },
+ {}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ .of_match_table = msm_iommu_ctx_match_table,
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+
+ msm_iommu_check_scm_call_avail();
+ msm_set_iommu_access_ops(&iommu_access_ops_v1);
+ msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU context driver\n");
+ platform_driver_unregister(&msm_iommu_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h
new file mode 100644
index 0000000000000..53e2f4874adb5
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h
@@ -0,0 +1,2320 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT 12
+
+#define CTX_REG(reg, base, ctx) \
+ ((base) + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+ ((base) + (reg))
+#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2)))
+#define GLB_FIELD(b, r) ((b) + (r))
+#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT))
+#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2))
+
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx)))
+
+#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base))
+#define SET_GLOBAL_REG_Q(reg, base, val) \
+ (writeq_relaxed((val), GLB_REG(reg, base)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+ writel_relaxed((val), (CTX_REG(reg, base, ctx)))
+#define SET_CTX_REG_Q(reg, base, ctx, val) \
+ writeq_relaxed((val), CTX_REG(reg, base, ctx))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r))
+#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+ GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+ GET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD_Q(b, c, r, F) \
+ GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+ SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+ SET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \
+ SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+ SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+ GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+#define GET_FIELD_Q(addr, mask, shift) \
+ ((readq_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+ int t = readl_relaxed(addr); \
+ writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+ (mask)) << (shift)), addr); \
+} while (0)
+
+#define SET_FIELD_Q(addr, mask, shift, v) \
+do { \
+ u64 t = readq_relaxed(addr); \
+ writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \
+ ((u64) mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v))
+#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v))
+#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v))
+#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b))
+#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b))
+#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+ GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c))
+#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0/NSCR0 */
+#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v)
+#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v)
+#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v)
+
+#define SET_ACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v)
+#define SET_ACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v)
+#define SET_ACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v)
+
+#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v)
+#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v)
+#define SET_NSACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v)
+
+#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+ SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+ GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+ SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b) \
+ GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+/* These are shared between VMSA and LPAE */
+#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define GET_CB_TTBCR_NSCFG0(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+
+#ifdef CONFIG_IOMMU_LPAE
+
+/* LPAE format */
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
+#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
+#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v)
+#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v)
+#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v)
+#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v)
+#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v)
+#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v)
+#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v)
+#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ)
+#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0)
+#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1)
+#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0)
+#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1)
+#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0)
+#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1)
+
+#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v))
+#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v))
+
+#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c))
+#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c))
+#else
+#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+#endif
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0 (0x0000)
+#define SCR1 (0x0004)
+#define CR2 (0x0008)
+#define ACR (0x0010)
+#define IDR0 (0x0020)
+#define IDR1 (0x0024)
+#define IDR2 (0x0028)
+#define IDR7 (0x003C)
+#define GFAR (0x0040)
+#define GFSR (0x0048)
+#define GFSRRESTORE (0x004C)
+#define GFSYNR0 (0x0050)
+#define GFSYNR1 (0x0054)
+#define GFSYNR2 (0x0058)
+#define TLBIVMID (0x0064)
+#define TLBIALLNSNH (0x0068)
+#define TLBIALLH (0x006C)
+#define TLBGSYNC (0x0070)
+#define TLBGSTATUS (0x0074)
+#define TLBIVAH (0x0078)
+#define GATS1UR (0x0100)
+#define GATS1UW (0x0108)
+#define GATS1PR (0x0110)
+#define GATS1PW (0x0118)
+#define GATS12UR (0x0120)
+#define GATS12UW (0x0128)
+#define GATS12PR (0x0130)
+#define GATS12PW (0x0138)
+#define GPAR (0x0180)
+#define GATSR (0x0188)
+#define NSCR0 (0x0400)
+#define NSCR2 (0x0408)
+#define NSACR (0x0410)
+#define NSGFAR (0x0440)
+#define NSGFSRRESTORE (0x044C)
+#define SMR (0x0800)
+#define S2CR (0x0C00)
+
+/* SMMU_LOCAL */
+#define SMMU_INTR_SEL_NS (0x2000)
+
+/* Global Register Space 1 */
+#define CBAR (0x1000)
+#define CBFRSYNRA (0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL (0x2000)
+#define PREDICTIONDIS0 (0x204C)
+#define PREDICTIONDIS1 (0x2050)
+#define S1L1BFBLP0 (0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N (0x3000)
+#define PMEVTYPER_N (0x3400)
+#define PMCGCR_N (0x3800)
+#define PMCGSMR_N (0x3A00)
+#define PMCNTENSET_N (0x3C00)
+#define PMCNTENCLR_N (0x3C20)
+#define PMINTENSET_N (0x3C40)
+#define PMINTENCLR_N (0x3C60)
+#define PMOVSCLR_N (0x3C80)
+#define PMOVSSET_N (0x3CC0)
+#define PMCFGR (0x3E00)
+#define PMCR (0x3E04)
+#define PMCEID0 (0x3E20)
+#define PMCEID1 (0x3E24)
+#define PMAUTHSTATUS (0x3FB8)
+#define PMDEVTYPE (0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N (0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR (0x000)
+#define CB_ACTLR (0x004)
+#define CB_RESUME (0x008)
+#define CB_TTBR0 (0x020)
+#define CB_TTBR1 (0x028)
+#define CB_TTBCR (0x030)
+#define CB_CONTEXTIDR (0x034)
+#define CB_PRRR (0x038)
+#define CB_MAIR0 (0x038)
+#define CB_NMRR (0x03C)
+#define CB_MAIR1 (0x03C)
+#define CB_PAR (0x050)
+#define CB_FSR (0x058)
+#define CB_FSRRESTORE (0x05C)
+#define CB_FAR (0x060)
+#define CB_FSYNR0 (0x068)
+#define CB_FSYNR1 (0x06C)
+#define CB_TLBIVA (0x600)
+#define CB_TLBIVAA (0x608)
+#define CB_TLBIASID (0x610)
+#define CB_TLBIALL (0x618)
+#define CB_TLBIVAL (0x620)
+#define CB_TLBIVAAL (0x628)
+#define CB_TLBSYNC (0x7F0)
+#define CB_TLBSTATUS (0x7F4)
+#define CB_ATS1PR (0x800)
+#define CB_ATS1PW (0x808)
+#define CB_ATS1UR (0x810)
+#define CB_ATS1UW (0x818)
+#define CB_ATSR (0x8F0)
+#define CB_PMXEVCNTR_N (0xE00)
+#define CB_PMXEVTYPER_N (0xE80)
+#define CB_PMCFGR (0xF00)
+#define CB_PMCR (0xF04)
+#define CB_PMCEID0 (0xF20)
+#define CB_PMCEID1 (0xF24)
+#define CB_PMCNTENSET (0xF40)
+#define CB_PMCNTENCLR (0xF44)
+#define CB_PMINTENSET (0xF48)
+#define CB_PMINTENCLR (0xF4C)
+#define CB_PMOVSCLR (0xF50)
+#define CB_PMOVSSET (0xF58)
+#define CB_PMAUTHSTATUS (0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT)
+#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT)
+#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT)
+#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT)
+#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT)
+#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT)
+#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT)
+#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT)
+#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT)
+#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT)
+#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT)
+#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT)
+#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT)
+#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT)
+#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT)
+#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT)
+#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT)
+#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT)
+#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT)
+#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT)
+#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT)
+#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT)
+#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT)
+#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT)
+#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT)
+#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT)
+#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT)
+#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT)
+#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT)
+#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT)
+#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT)
+#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT)
+#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT)
+#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT)
+#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT)
+#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+ TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT)
+#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT)
+#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT)
+#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+ (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+ (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \
+ CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \
+ CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT)
+#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT)
+#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT)
+#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+ CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+ CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
+
+#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
+
+#ifdef CONFIG_IOMMU_LPAE
+/* Translation Table Base Register: CB_TTBR */
+#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
+#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT)
+#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT)
+#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT)
+#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT)
+#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT)
+#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT)
+#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT)
+#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT)
+#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT)
+#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT)
+
+#else
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT)
+#endif
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK 0x03
+#define CR0_WACFG_MASK 0x03
+#define CR0_RACFG_MASK 0x03
+#define CR0_SHCFG_MASK 0x03
+#define CR0_SMCFCFG_MASK 0x01
+#define NSCR0_SMCFCFG_MASK 0x01
+#define CR0_MTCFG_MASK 0x01
+#define CR0_MEMATTR_MASK 0x0F
+#define CR0_BSU_MASK 0x03
+#define CR0_FB_MASK 0x01
+#define CR0_PTM_MASK 0x01
+#define CR0_VMIDPNE_MASK 0x01
+#define CR0_USFCFG_MASK 0x01
+#define NSCR0_USFCFG_MASK 0x01
+#define CR0_GSE_MASK 0x01
+#define CR0_STALLD_MASK 0x01
+#define NSCR0_STALLD_MASK 0x01
+#define CR0_TRANSIENTCFG_MASK 0x03
+#define CR0_GCFGFIE_MASK 0x01
+#define NSCR0_GCFGFIE_MASK 0x01
+#define CR0_GCFGFRE_MASK 0x01
+#define NSCR0_GCFGFRE_MASK 0x01
+#define CR0_GFIE_MASK 0x01
+#define NSCR0_GFIE_MASK 0x01
+#define CR0_GFRE_MASK 0x01
+#define NSCR0_GFRE_MASK 0x01
+#define CR0_CLIENTPD_MASK 0x01
+#define NSCR0_CLIENTPD_MASK 0x01
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_MASK 0x01
+#define ACR_MMUDIS_BPTLBEN_MASK 0x01
+#define ACR_S2CR_BPTLBEN_MASK 0x01
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_MASK 0x01
+#define NSACR_MMUDIS_BPTLBEN_MASK 0x01
+#define NSACR_S2CR_BPTLBEN_MASK 0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK 0xFFFFF
+#define GATS1PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK 0xFFFFF
+#define GATS1PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK 0xFFFFF
+#define GATS1UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK 0xFFFFF
+#define GATS1UW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK 0xFFFFF
+#define GATS12PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK 0xFFFFF
+#define GATS12PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK 0xFFFFF
+#define GATS12UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK 0xFFFFF
+#define GATS12UW_NDX_MASK 0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK 0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK 0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK 0x01
+#define GFSR_USF_MASK 0x01
+#define GFSR_SMCF_MASK 0x01
+#define GFSR_UCBF_MASK 0x01
+#define GFSR_UCIF_MASK 0x01
+#define GFSR_CAF_MASK 0x01
+#define GFSR_EF_MASK 0x01
+#define GFSR_PF_MASK 0x01
+#define GFSR_MULTI_MASK 0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK 0x01
+#define GFSYNR0_WNR_MASK 0x01
+#define GFSYNR0_PNU_MASK 0x01
+#define GFSYNR0_IND_MASK 0x01
+#define GFSYNR0_NSSTATE_MASK 0x01
+#define GFSYNR0_NSATTR_MASK 0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK 0x7FFF
+#define GFSYNr1_SSD_IDX_MASK 0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK 0x01
+#define GPAR_SS_MASK 0x01
+#define GPAR_OUTER_MASK 0x03
+#define GPAR_INNER_MASK 0x03
+#define GPAR_SH_MASK 0x01
+#define GPAR_NS_MASK 0x01
+#define GPAR_NOS_MASK 0x01
+#define GPAR_PA_MASK 0xFFFFF
+#define GPAR_TF_MASK 0x01
+#define GPAR_AFF_MASK 0x01
+#define GPAR_PF_MASK 0x01
+#define GPAR_EF_MASK 0x01
+#define GPAR_TLBMCF_MASK 0x01
+#define GPAR_TLBLKF_MASK 0x01
+#define GPAR_UCBF_MASK 0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK 0xFF
+#define IDR0_NUMSIDB_MASK 0x0F
+#define IDR0_BTM_MASK 0x01
+#define IDR0_CTTW_MASK 0x01
+#define IDR0_NUMIPRT_MASK 0xFF
+#define IDR0_PTFS_MASK 0x01
+#define IDR0_SMS_MASK 0x01
+#define IDR0_NTS_MASK 0x01
+#define IDR0_S2TS_MASK 0x01
+#define IDR0_S1TS_MASK 0x01
+#define IDR0_SES_MASK 0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK 0xFF
+#define IDR1_NUMSSDNDXB_MASK 0x0F
+#define IDR1_SSDTP_MASK 0x01
+#define IDR1_SMCD_MASK 0x01
+#define IDR1_NUMS2CB_MASK 0xFF
+#define IDR1_NUMPAGENDXB_MASK 0x07
+#define IDR1_PAGESIZE_MASK 0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK 0x0F
+#define IDR2_OAS_MASK 0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK 0x0F
+#define IDR7_MAJOR_MASK 0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK 0xFF
+#define S2CR_SHCFG_MASK 0x03
+#define S2CR_MTCFG_MASK 0x01
+#define S2CR_MEMATTR_MASK 0x0F
+#define S2CR_TYPE_MASK 0x03
+#define S2CR_NSCFG_MASK 0x03
+#define S2CR_RACFG_MASK 0x03
+#define S2CR_WACFG_MASK 0x03
+#define S2CR_PRIVCFG_MASK 0x03
+#define S2CR_INSTCFG_MASK 0x03
+#define S2CR_TRANSIENTCFG_MASK 0x03
+#define S2CR_VMID_MASK 0xFF
+#define S2CR_BSU_MASK 0x03
+#define S2CR_FB_MASK 0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK 0x7FFF
+#define SMR_MASK_MASK 0x7FFF
+#define SMR_VALID_MASK 0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK 0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK 0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK 0xFF
+#define CBAR_CBNDX_MASK 0x03
+#define CBAR_BPSHCFG_MASK 0x03
+#define CBAR_HYPC_MASK 0x01
+#define CBAR_FB_MASK 0x01
+#define CBAR_MEMATTR_MASK 0x0F
+#define CBAR_TYPE_MASK 0x03
+#define CBAR_BSU_MASK 0x03
+#define CBAR_RACFG_MASK 0x03
+#define CBAR_WACFG_MASK 0x03
+#define CBAR_IRPTNDX_MASK 0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK 0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
+#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
+#define MICRO_MMU_CTRL_IDLE_MASK 0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK 0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK 0x3
+#define CB_ACTLR_BPRCOSH_MASK 0x1
+#define CB_ACTLR_BPRCISH_MASK 0x1
+#define CB_ACTLR_BPRCNSH_MASK 0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK 0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK 0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK 0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK 0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK 0x01
+#define CB_FSR_AFF_MASK 0x01
+#define CB_FSR_PF_MASK 0x01
+#define CB_FSR_EF_MASK 0x01
+#define CB_FSR_TLBMCF_MASK 0x01
+#define CB_FSR_TLBLKF_MASK 0x01
+#define CB_FSR_SS_MASK 0x01
+#define CB_FSR_MULTI_MASK 0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK 0x03
+#define CB_FSYNR0_S1PTWF_MASK 0x01
+#define CB_FSYNR0_WNR_MASK 0x01
+#define CB_FSYNR0_PNU_MASK 0x01
+#define CB_FSYNR0_IND_MASK 0x01
+#define CB_FSYNR0_NSSTATE_MASK 0x01
+#define CB_FSYNR0_NSATTR_MASK 0x01
+#define CB_FSYNR0_ATOF_MASK 0x01
+#define CB_FSYNR0_PTWF_MASK 0x01
+#define CB_FSYNR0_AFR_MASK 0x01
+#define CB_FSYNR0_S1CBNDX_MASK 0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK 0x03
+#define CB_NMRR_IR1_MASK 0x03
+#define CB_NMRR_IR2_MASK 0x03
+#define CB_NMRR_IR3_MASK 0x03
+#define CB_NMRR_IR4_MASK 0x03
+#define CB_NMRR_IR5_MASK 0x03
+#define CB_NMRR_IR6_MASK 0x03
+#define CB_NMRR_IR7_MASK 0x03
+#define CB_NMRR_OR0_MASK 0x03
+#define CB_NMRR_OR1_MASK 0x03
+#define CB_NMRR_OR2_MASK 0x03
+#define CB_NMRR_OR3_MASK 0x03
+#define CB_NMRR_OR4_MASK 0x03
+#define CB_NMRR_OR5_MASK 0x03
+#define CB_NMRR_OR6_MASK 0x03
+#define CB_NMRR_OR7_MASK 0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK 0x01
+#define CB_PAR_SS_MASK 0x01
+#define CB_PAR_OUTER_MASK 0x03
+#define CB_PAR_INNER_MASK 0x07
+#define CB_PAR_SH_MASK 0x01
+#define CB_PAR_NS_MASK 0x01
+#define CB_PAR_NOS_MASK 0x01
+#define CB_PAR_PA_MASK 0xFFFFF
+#define CB_PAR_TF_MASK 0x01
+#define CB_PAR_AFF_MASK 0x01
+#define CB_PAR_PF_MASK 0x01
+#define CB_PAR_EF_MASK 0x01
+#define CB_PAR_TLBMCF_MASK 0x01
+#define CB_PAR_TLBLKF_MASK 0x01
+#define CB_PAR_ATOT_MASK 0x01ULL
+#define CB_PAR_PLVL_MASK 0x03ULL
+#define CB_PAR_STAGE_MASK 0x01ULL
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK 0x03
+#define CB_PRRR_TR1_MASK 0x03
+#define CB_PRRR_TR2_MASK 0x03
+#define CB_PRRR_TR3_MASK 0x03
+#define CB_PRRR_TR4_MASK 0x03
+#define CB_PRRR_TR5_MASK 0x03
+#define CB_PRRR_TR6_MASK 0x03
+#define CB_PRRR_TR7_MASK 0x03
+#define CB_PRRR_DS0_MASK 0x01
+#define CB_PRRR_DS1_MASK 0x01
+#define CB_PRRR_NS0_MASK 0x01
+#define CB_PRRR_NS1_MASK 0x01
+#define CB_PRRR_NOS0_MASK 0x01
+#define CB_PRRR_NOS1_MASK 0x01
+#define CB_PRRR_NOS2_MASK 0x01
+#define CB_PRRR_NOS3_MASK 0x01
+#define CB_PRRR_NOS4_MASK 0x01
+#define CB_PRRR_NOS5_MASK 0x01
+#define CB_PRRR_NOS6_MASK 0x01
+#define CB_PRRR_NOS7_MASK 0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK 0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK 0x01
+#define CB_SCTLR_TRE_MASK 0x01
+#define CB_SCTLR_AFE_MASK 0x01
+#define CB_SCTLR_AFFD_MASK 0x01
+#define CB_SCTLR_E_MASK 0x01
+#define CB_SCTLR_CFRE_MASK 0x01
+#define CB_SCTLR_CFIE_MASK 0x01
+#define CB_SCTLR_CFCFG_MASK 0x01
+#define CB_SCTLR_HUPCF_MASK 0x01
+#define CB_SCTLR_WXN_MASK 0x01
+#define CB_SCTLR_UWXN_MASK 0x01
+#define CB_SCTLR_ASIDPNE_MASK 0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK 0x0F
+#define CB_SCTLR_MTCFG_MASK 0x01
+#define CB_SCTLR_SHCFG_MASK 0x03
+#define CB_SCTLR_RACFG_MASK 0x03
+#define CB_SCTLR_WACFG_MASK 0x03
+#define CB_SCTLR_NSCFG_MASK 0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK 0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK 0xFF
+#define CB_TLBIVA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK 0xFF
+#define CB_TLBIVAL_VA_MASK 0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK 0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK 0x07
+#define CB_TTBCR_T1SZ_MASK 0x07
+#define CB_TTBCR_EPD0_MASK 0x01
+#define CB_TTBCR_EPD1_MASK 0x01
+#define CB_TTBCR_IRGN0_MASK 0x03
+#define CB_TTBCR_IRGN1_MASK 0x03
+#define CB_TTBCR_ORGN0_MASK 0x03
+#define CB_TTBCR_ORGN1_MASK 0x03
+#define CB_TTBCR_NSCFG0_MASK 0x01
+#define CB_TTBCR_NSCFG1_MASK 0x01
+#define CB_TTBCR_SH0_MASK 0x03
+#define CB_TTBCR_SH1_MASK 0x03
+#define CB_TTBCR_A1_MASK 0x01
+#define CB_TTBCR_EAE_MASK 0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
+#define CB_TTBR0_ASID_MASK 0xFF
+#define CB_TTBR1_ASID_MASK 0xFF
+#else
+#define CB_TTBR0_IRGN1_MASK 0x01
+#define CB_TTBR0_S_MASK 0x01
+#define CB_TTBR0_RGN_MASK 0x01
+#define CB_TTBR0_NOS_MASK 0x01
+#define CB_TTBR0_IRGN0_MASK 0x01
+#define CB_TTBR0_ADDR_MASK 0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK 0x1
+#define CB_TTBR1_S_MASK 0x1
+#define CB_TTBR1_RGN_MASK 0x1
+#define CB_TTBR1_NOS_MASK 0X1
+#define CB_TTBR1_IRGN0_MASK 0X1
+#endif
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT 28
+#define CR0_WACFG_SHIFT 26
+#define CR0_RACFG_SHIFT 24
+#define CR0_SHCFG_SHIFT 22
+#define CR0_SMCFCFG_SHIFT 21
+#define NSCR0_SMCFCFG_SHIFT 21
+#define CR0_MTCFG_SHIFT 20
+#define CR0_MEMATTR_SHIFT 16
+#define CR0_BSU_SHIFT 14
+#define CR0_FB_SHIFT 13
+#define CR0_PTM_SHIFT 12
+#define CR0_VMIDPNE_SHIFT 11
+#define CR0_USFCFG_SHIFT 10
+#define NSCR0_USFCFG_SHIFT 10
+#define CR0_GSE_SHIFT 9
+#define CR0_STALLD_SHIFT 8
+#define NSCR0_STALLD_SHIFT 8
+#define CR0_TRANSIENTCFG_SHIFT 6
+#define CR0_GCFGFIE_SHIFT 5
+#define NSCR0_GCFGFIE_SHIFT 5
+#define CR0_GCFGFRE_SHIFT 4
+#define NSCR0_GCFGFRE_SHIFT 4
+#define CR0_GFIE_SHIFT 2
+#define NSCR0_GFIE_SHIFT 2
+#define CR0_GFRE_SHIFT 1
+#define NSCR0_GFRE_SHIFT 1
+#define CR0_CLIENTPD_SHIFT 0
+#define NSCR0_CLIENTPD_SHIFT 0
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_SHIFT 8
+#define ACR_MMUDIS_BPTLBEN_SHIFT 9
+#define ACR_S2CR_BPTLBEN_SHIFT 10
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_SHIFT 8
+#define NSACR_MMUDIS_BPTLBEN_SHIFT 9
+#define NSACR_S2CR_BPTLBEN_SHIFT 10
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT 12
+#define GATS1PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT 12
+#define GATS1PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT 12
+#define GATS1UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT 12
+#define GATS1UW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT 12
+#define GATS12PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT 12
+#define GATS12PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT 12
+#define GATS12UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT 12
+#define GATS12UW_NDX_SHIFT 0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT 0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT 0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT 0
+#define GFSR_USF_SHIFT 1
+#define GFSR_SMCF_SHIFT 2
+#define GFSR_UCBF_SHIFT 3
+#define GFSR_UCIF_SHIFT 4
+#define GFSR_CAF_SHIFT 5
+#define GFSR_EF_SHIFT 6
+#define GFSR_PF_SHIFT 7
+#define GFSR_MULTI_SHIFT 31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT 0
+#define GFSYNR0_WNR_SHIFT 1
+#define GFSYNR0_PNU_SHIFT 2
+#define GFSYNR0_IND_SHIFT 3
+#define GFSYNR0_NSSTATE_SHIFT 4
+#define GFSYNR0_NSATTR_SHIFT 5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT 0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT 0
+#define GPAR_SS_SHIFT 1
+#define GPAR_OUTER_SHIFT 2
+#define GPAR_INNER_SHIFT 4
+#define GPAR_SH_SHIFT 7
+#define GPAR_NS_SHIFT 9
+#define GPAR_NOS_SHIFT 10
+#define GPAR_PA_SHIFT 12
+#define GPAR_TF_SHIFT 1
+#define GPAR_AFF_SHIFT 2
+#define GPAR_PF_SHIFT 3
+#define GPAR_EF_SHIFT 4
+#define GPAR_TLCMCF_SHIFT 5
+#define GPAR_TLBLKF_SHIFT 6
+#define GFAR_UCBF_SHIFT 30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT 0
+#define IDR0_NUMSIDB_SHIFT 9
+#define IDR0_BTM_SHIFT 13
+#define IDR0_CTTW_SHIFT 14
+#define IDR0_NUMIRPT_SHIFT 16
+#define IDR0_PTFS_SHIFT 24
+#define IDR0_SMS_SHIFT 27
+#define IDR0_NTS_SHIFT 28
+#define IDR0_S2TS_SHIFT 29
+#define IDR0_S1TS_SHIFT 30
+#define IDR0_SES_SHIFT 31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT 0
+#define IDR1_NUMSSDNDXB_SHIFT 8
+#define IDR1_SSDTP_SHIFT 12
+#define IDR1_SMCD_SHIFT 15
+#define IDR1_NUMS2CB_SHIFT 16
+#define IDR1_NUMPAGENDXB_SHIFT 28
+#define IDR1_PAGESIZE_SHIFT 31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT 0
+#define IDR2_OAS_SHIFT 4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT 0
+#define IDR7_MAJOR_SHIFT 4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT 0
+#define s2CR_SHCFG_SHIFT 8
+#define S2CR_MTCFG_SHIFT 11
+#define S2CR_MEMATTR_SHIFT 12
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_NSCFG_SHIFT 18
+#define S2CR_RACFG_SHIFT 20
+#define S2CR_WACFG_SHIFT 22
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_INSTCFG_SHIFT 26
+#define S2CR_TRANSIENTCFG_SHIFT 28
+#define S2CR_VMID_SHIFT 0
+#define S2CR_BSU_SHIFT 24
+#define S2CR_FB_SHIFT 26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT 0
+#define SMR_MASK_SHIFT 16
+#define SMR_VALID_SHIFT 31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT 0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT 12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT 0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT 0
+#define CBAR_CBNDX_SHIFT 8
+#define CBAR_BPSHCFG_SHIFT 8
+#define CBAR_HYPC_SHIFT 10
+#define CBAR_FB_SHIFT 11
+#define CBAR_MEMATTR_SHIFT 12
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_BSU_SHIFT 18
+#define CBAR_RACFG_SHIFT 20
+#define CBAR_WACFG_SHIFT 22
+#define CBAR_IRPTNDX_SHIFT 24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT 0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT 0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4
+#define CB_ACTLR_PRIVCFG_SHIFT 8
+#define CB_ACTLR_BPRCOSH_SHIFT 28
+#define CB_ACTLR_BPRCISH_SHIFT 29
+#define CB_ACTLR_BPRCNSH_SHIFT 30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT 12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT 0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT 0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT 0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT 1
+#define CB_FSR_AFF_SHIFT 2
+#define CB_FSR_PF_SHIFT 3
+#define CB_FSR_EF_SHIFT 4
+#define CB_FSR_TLBMCF_SHIFT 5
+#define CB_FSR_TLBLKF_SHIFT 6
+#define CB_FSR_SS_SHIFT 30
+#define CB_FSR_MULTI_SHIFT 31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT 0
+#define CB_FSYNR0_S1PTWF_SHIFT 3
+#define CB_FSYNR0_WNR_SHIFT 4
+#define CB_FSYNR0_PNU_SHIFT 5
+#define CB_FSYNR0_IND_SHIFT 6
+#define CB_FSYNR0_NSSTATE_SHIFT 7
+#define CB_FSYNR0_NSATTR_SHIFT 8
+#define CB_FSYNR0_ATOF_SHIFT 9
+#define CB_FSYNR0_PTWF_SHIFT 10
+#define CB_FSYNR0_AFR_SHIFT 11
+#define CB_FSYNR0_S1CBNDX_SHIFT 16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT 0
+#define CB_NMRR_IR1_SHIFT 2
+#define CB_NMRR_IR2_SHIFT 4
+#define CB_NMRR_IR3_SHIFT 6
+#define CB_NMRR_IR4_SHIFT 8
+#define CB_NMRR_IR5_SHIFT 10
+#define CB_NMRR_IR6_SHIFT 12
+#define CB_NMRR_IR7_SHIFT 14
+#define CB_NMRR_OR0_SHIFT 16
+#define CB_NMRR_OR1_SHIFT 18
+#define CB_NMRR_OR2_SHIFT 20
+#define CB_NMRR_OR3_SHIFT 22
+#define CB_NMRR_OR4_SHIFT 24
+#define CB_NMRR_OR5_SHIFT 26
+#define CB_NMRR_OR6_SHIFT 28
+#define CB_NMRR_OR7_SHIFT 30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT 0
+#define CB_PAR_SS_SHIFT 1
+#define CB_PAR_OUTER_SHIFT 2
+#define CB_PAR_INNER_SHIFT 4
+#define CB_PAR_SH_SHIFT 7
+#define CB_PAR_NS_SHIFT 9
+#define CB_PAR_NOS_SHIFT 10
+#define CB_PAR_PA_SHIFT 12
+#define CB_PAR_TF_SHIFT 1
+#define CB_PAR_AFF_SHIFT 2
+#define CB_PAR_PF_SHIFT 3
+#define CB_PAR_EF_SHIFT 4
+#define CB_PAR_TLBMCF_SHIFT 5
+#define CB_PAR_TLBLKF_SHIFT 6
+#define CB_PAR_ATOT_SHIFT 31
+#define CB_PAR_PLVL_SHIFT 32
+#define CB_PAR_STAGE_SHIFT 35
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT 0
+#define CB_PRRR_TR1_SHIFT 2
+#define CB_PRRR_TR2_SHIFT 4
+#define CB_PRRR_TR3_SHIFT 6
+#define CB_PRRR_TR4_SHIFT 8
+#define CB_PRRR_TR5_SHIFT 10
+#define CB_PRRR_TR6_SHIFT 12
+#define CB_PRRR_TR7_SHIFT 14
+#define CB_PRRR_DS0_SHIFT 16
+#define CB_PRRR_DS1_SHIFT 17
+#define CB_PRRR_NS0_SHIFT 18
+#define CB_PRRR_NS1_SHIFT 19
+#define CB_PRRR_NOS0_SHIFT 24
+#define CB_PRRR_NOS1_SHIFT 25
+#define CB_PRRR_NOS2_SHIFT 26
+#define CB_PRRR_NOS3_SHIFT 27
+#define CB_PRRR_NOS4_SHIFT 28
+#define CB_PRRR_NOS5_SHIFT 29
+#define CB_PRRR_NOS6_SHIFT 30
+#define CB_PRRR_NOS7_SHIFT 31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT 0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT 0
+#define CB_SCTLR_TRE_SHIFT 1
+#define CB_SCTLR_AFE_SHIFT 2
+#define CB_SCTLR_AFFD_SHIFT 3
+#define CB_SCTLR_E_SHIFT 4
+#define CB_SCTLR_CFRE_SHIFT 5
+#define CB_SCTLR_CFIE_SHIFT 6
+#define CB_SCTLR_CFCFG_SHIFT 7
+#define CB_SCTLR_HUPCF_SHIFT 8
+#define CB_SCTLR_WXN_SHIFT 9
+#define CB_SCTLR_UWXN_SHIFT 10
+#define CB_SCTLR_ASIDPNE_SHIFT 12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT 16
+#define CB_SCTLR_MTCFG_SHIFT 20
+#define CB_SCTLR_SHCFG_SHIFT 22
+#define CB_SCTLR_RACFG_SHIFT 24
+#define CB_SCTLR_WACFG_SHIFT 26
+#define CB_SCTLR_NSCFG_SHIFT 28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT 0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT 0
+#define CB_TLBIVA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT 12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT 0
+#define CB_TLBIVAL_VA_SHIFT 12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT 0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT 0
+#define CB_TTBCR_T1SZ_SHIFT 16
+#define CB_TTBCR_EPD0_SHIFT 4
+#define CB_TTBCR_EPD1_SHIFT 5
+#define CB_TTBCR_NSCFG0_SHIFT 14
+#define CB_TTBCR_NSCFG1_SHIFT 30
+#define CB_TTBCR_EAE_SHIFT 31
+#define CB_TTBCR_IRGN0_SHIFT 8
+#define CB_TTBCR_IRGN1_SHIFT 24
+#define CB_TTBCR_ORGN0_SHIFT 10
+#define CB_TTBCR_ORGN1_SHIFT 26
+#define CB_TTBCR_A1_SHIFT 22
+#define CB_TTBCR_SH0_SHIFT 12
+#define CB_TTBCR_SH1_SHIFT 28
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_SHIFT 5
+#define CB_TTBR0_ASID_SHIFT 48
+#define CB_TTBR1_ASID_SHIFT 48
+#else
+#define CB_TTBR0_IRGN1_SHIFT 0
+#define CB_TTBR0_S_SHIFT 1
+#define CB_TTBR0_RGN_SHIFT 3
+#define CB_TTBR0_NOS_SHIFT 5
+#define CB_TTBR0_IRGN0_SHIFT 6
+#define CB_TTBR0_ADDR_SHIFT 14
+
+#define CB_TTBR1_IRGN1_SHIFT 0
+#define CB_TTBR1_S_SHIFT 1
+#define CB_TTBR1_RGN_SHIFT 3
+#define CB_TTBR1_NOS_SHIFT 5
+#define CB_TTBR1_IRGN0_SHIFT 6
+#define CB_TTBR1_ADDR_SHIFT 14
+#endif
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c
new file mode 100644
index 0000000000000..1f11abb9db7bf
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.c
@@ -0,0 +1,645 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_priv.h"
+#include <trace/events/kmem.h>
+#include "msm_iommu_pagetable.h"
+
+#define NUM_FL_PTE 4096
+#define NUM_SL_PTE 256
+#define GUARD_PTE 2
+#define NUM_TEX_CLASS 8
+
+/* First-level page table bits */
+#define FL_BASE_MASK 0xFFFFFC00
+#define FL_TYPE_TABLE (1 << 0)
+#define FL_TYPE_SECT (2 << 0)
+#define FL_SUPERSECTION (1 << 18)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
+#define FL_SHARED (1 << 16)
+#define FL_BUFFERABLE (1 << 2)
+#define FL_CACHEABLE (1 << 3)
+#define FL_TEX0 (1 << 12)
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define FL_NG (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE 0xFFFF0000
+#define SL_BASE_MASK_SMALL 0xFFFFF000
+#define SL_TYPE_LARGE (1 << 0)
+#define SL_TYPE_SMALL (2 << 0)
+#define SL_AP0 (1 << 4)
+#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
+#define SL_SHARED (1 << 10)
+#define SL_BUFFERABLE (1 << 2)
+#define SL_CACHEABLE (1 << 3)
+#define SL_TEX0 (1 << 6)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+#define SL_NG (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO 0
+#define MT_DEV 1
+#define MT_IOMMU_NORMAL 2
+#define CP_NONCACHED 0
+#define CP_WB_WA 1
+#define CP_WT 2
+#define CP_WB_NWA 3
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH 0x0
+#define MSM_IOMMU_ATTR_SH 0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED 0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
+#define MSM_IOMMU_ATTR_CACHED_WT 0x3
+
+static int msm_iommu_tex_class[4];
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+static inline void clean_pte(u32 *start, u32 *end, int redirect)
+{
+ if (!redirect)
+ dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
+{
+ pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K));
+ if (!pt->fl_table)
+ return -ENOMEM;
+
+ pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+ if (!pt->fl_table_shadow) {
+ free_pages((unsigned long)pt->fl_table, get_order(SZ_16K));
+ return -ENOMEM;
+ }
+
+ memset(pt->fl_table, 0, SZ_16K);
+ memset(pt->fl_table_shadow, 0, SZ_16K);
+ clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+ return 0;
+}
+
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
+{
+ u32 *fl_table;
+ u32 *fl_table_shadow;
+ int i;
+
+ fl_table = pt->fl_table;
+ fl_table_shadow = pt->fl_table_shadow;
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+ free_pages((unsigned long)fl_table, get_order(SZ_16K));
+ pt->fl_table = 0;
+
+ free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K));
+ pt->fl_table_shadow = 0;
+}
+
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ /*
+ * Adding 2 for worst case. We could be spanning 3 second level pages
+ * if we unmapped just over 1MB.
+ */
+ u32 n_entries = len / SZ_1M + 2;
+ u32 fl_offset = FL_OFFSET(va);
+ u32 i;
+
+ for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
+ u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
+ u32 sl_table = *fl_pte_shadow;
+
+ if (sl_table && !(sl_table & 0x1FF)) {
+ free_pages((unsigned long) sl_table_va,
+ get_order(SZ_4K));
+ *fl_pte_shadow = 0;
+ }
+ ++fl_offset;
+ }
+}
+
+static int __get_pgprot(int prot, int len)
+{
+ unsigned int pgprot;
+ int tex;
+
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+ prot |= IOMMU_READ | IOMMU_WRITE;
+ WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+ }
+
+ if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+ prot |= IOMMU_READ;
+ WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+ }
+
+ if (prot & IOMMU_CACHE)
+ tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07;
+ else
+ tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+ return 0;
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = FL_SHARED;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? FL_AP0 :
+ (FL_AP0 | FL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+ } else {
+ pgprot = SL_SHARED;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? SL_AP0 :
+ (SL_AP0 | SL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+ }
+
+ return pgprot;
+}
+
+static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
+ u32 *fl_pte_shadow)
+{
+ u32 *sl;
+ sl = (u32 *) __get_free_pages(GFP_KERNEL,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ goto fail;
+ }
+ memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);
+
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+ FL_TYPE_TABLE);
+ *fl_pte_shadow = *fl_pte & ~0x1FF;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+ return sl;
+}
+
+static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ if (*sl_pte) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+ | SL_TYPE_SMALL | pgprot;
+fail:
+ return ret;
+}
+
+static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*(sl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+ | SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+ return ret;
+}
+
+static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ if (*fl_pte)
+ return -EBUSY;
+
+ *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+ | pgprot;
+
+ return 0;
+}
+
+static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ int i;
+ int ret = 0;
+ for (i = 0; i < 16; i++)
+ if (*(fl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+ | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+ return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ int ret;
+ struct scatterlist sg;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %zd\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = pa;
+ sg.length = len;
+
+ ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot);
+
+fail:
+ return ret;
+}
+
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ msm_iommu_pagetable_unmap_range(pt, va, len);
+ return len;
+}
+
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ phys_addr_t pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+/*
+ * For debugging we may want to force mappings to be 4K only
+ */
+#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ if (align == SZ_4K) {
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+ } else {
+ return 0;
+ }
+}
+#else
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+}
+#endif
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot)
+{
+ phys_addr_t pa;
+ unsigned int start_va = va;
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset, sl_start;
+ unsigned int chunk_size, chunk_offset = 0;
+ int ret = 0;
+ unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ pgprot4k = __get_pgprot(prot, SZ_4K);
+ pgprot64k = __get_pgprot(prot, SZ_64K);
+ pgprot1m = __get_pgprot(prot, SZ_1M);
+ pgprot16m = __get_pgprot(prot, SZ_16M);
+ if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ pa = get_phys_addr(sg);
+
+ while (offset < len) {
+ chunk_size = SZ_4K;
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_16M))
+ chunk_size = SZ_16M;
+ else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_1M))
+ chunk_size = SZ_1M;
+ /* 64k or 4k determined later */
+
+// trace_iommu_map_range(va, pa, sg->length, chunk_size);
+
+ /* for 1M and 16M, only first level entries are required */
+ if (chunk_size >= SZ_1M) {
+ if (chunk_size == SZ_16M) {
+ ret = fl_16m(fl_pte, pa, pgprot16m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+ fl_pte += 16;
+ fl_pte_shadow += 16;
+ } else if (chunk_size == SZ_1M) {
+ ret = fl_1m(fl_pte, pa, pgprot1m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ continue;
+ }
+ /* for 4K or 64K, make sure there is a second level table */
+ if (*fl_pte == 0) {
+ if (!make_second_level(pt, fl_pte, fl_pte_shadow)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (!(*fl_pte & FL_TYPE_TABLE)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ /* Keep track of initial position so we
+ * don't clean more than we have to
+ */
+ sl_start = sl_offset;
+
+ /* Build the 2nd level page table */
+ while (offset < len && sl_offset < NUM_SL_PTE) {
+ /* Map a large 64K page if the chunk is large enough and
+ * the pa and va are aligned
+ */
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_64K))
+ chunk_size = SZ_64K;
+ else
+ chunk_size = SZ_4K;
+
+// trace_iommu_map_range(va, pa, sg->length,
+// chunk_size);
+
+ if (chunk_size == SZ_4K) {
+ sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+ sl_offset++;
+ /* Increment map count */
+ (*fl_pte_shadow)++;
+ } else {
+ BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+ sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+ sl_offset += 16;
+ /* Increment map count */
+ *fl_pte_shadow += 16;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ }
+
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ sl_offset = 0;
+ }
+
+fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
+ return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len)
+{
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table;
+ u32 sl_start, sl_end;
+ int used;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+
+ while (offset < len) {
+ if (*fl_pte & FL_TYPE_TABLE) {
+ unsigned int n_entries;
+
+ sl_start = SL_OFFSET(va);
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+ if (sl_end > NUM_SL_PTE)
+ sl_end = NUM_SL_PTE;
+ n_entries = sl_end - sl_start;
+
+ memset(sl_table + sl_start, 0, n_entries * 4);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ pt->redirect);
+
+ offset += n_entries * SZ_4K;
+ va += n_entries * SZ_4K;
+
+ BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries);
+
+ /* Decrement map count */
+ *fl_pte_shadow -= n_entries;
+ used = *fl_pte_shadow & 0x1FF;
+
+ if (!used) {
+ *fl_pte = 0;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ }
+
+ sl_start = 0;
+ } else {
+ *fl_pte = 0;
+ *fl_pte_shadow = 0;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ va += SZ_1M;
+ offset += SZ_1M;
+ sl_start = 0;
+ }
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+}
+
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_pt *pt = &priv->pt;
+ u32 *fl_pte;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset;
+ u32 *sl_pte;
+
+ if (!pt->fl_table) {
+ pr_err("Page table doesn't exist\n");
+ return 0;
+ }
+
+ fl_offset = FL_OFFSET(va);
+ fl_pte = pt->fl_table + fl_offset;
+
+ if (*fl_pte & FL_TYPE_TABLE) {
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+ /* 64 KB section */
+ if (*sl_pte & SL_TYPE_LARGE)
+ return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000);
+ /* 4 KB section */
+ if (*sl_pte & SL_TYPE_SMALL)
+ return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000);
+ } else {
+ /* 16 MB section */
+ if (*fl_pte & FL_SUPERSECTION)
+ return (*fl_pte & 0xFF000000) | (va & ~0xFF000000);
+ /* 1 MB section */
+ if (*fl_pte & FL_TYPE_SECT)
+ return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000);
+ }
+ return 0;
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr;
+ unsigned int nmrr;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ prrr = msm_iommu_get_prrr();
+ nmrr = msm_iommu_get_nmrr();
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED,
+ MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+ setup_iommu_tex_classes();
+}
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h
new file mode 100644
index 0000000000000..12a8d274f95e7
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+struct msm_iommu_pt;
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot);
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len);
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va);
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h
new file mode 100644
index 0000000000000..45683f4ebd886
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no: counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value: cached counter value
+ * @overflow_count: no of times counter has overflowed
+ * @enabled: indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir: debugfs directory for this counter
+ * @cnt_group: group this counter belongs to
+ */
+struct iommu_pmon_counter {
+ unsigned int counter_no;
+ unsigned int absolute_counter_no;
+ unsigned long value;
+ unsigned long overflow_count;
+ unsigned int enabled;
+ int current_event_class;
+ struct dentry *counter_dir;
+ struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no: group number
+ * @num_counters: number of counters in this group
+ * @counters: list of counter in this group
+ * @group_dir: debugfs directory for this group
+ * @pmon: pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+ unsigned int grp_no;
+ unsigned int num_counters;
+ struct iommu_pmon_counter *counters;
+ struct dentry *group_dir;
+ struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base: virtual base address for this iommu
+ * @evt_irq: irq number for event overflow interrupt
+ * @iommu_dev: pointer to iommu device
+ * @ops: iommu access operations pointer.
+ * @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
+ */
+struct iommu_info {
+ const char *iommu_name;
+ void *base;
+ int evt_irq;
+ struct device *iommu_dev;
+ struct iommu_access_ops *ops;
+ struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir: debugfs directory for this iommu
+ * @iommu: iommu_info instance
+ * @iommu_list: iommu_list head
+ * @cnt_grp: list of counter groups
+ * @num_groups: number of counter groups
+ * @num_counters: number of counters per group
+ * @event_cls_supported: an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled: Indicates whether perf. mon is enabled or not
+ * @iommu_attached Indicates whether iommu is attached or not.
+ * @lock: mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+ struct dentry *iommu_dir;
+ struct iommu_info iommu;
+ struct list_head iommu_list;
+ struct iommu_pmon_cnt_group *cnt_grp;
+ u32 num_groups;
+ u32 num_counters;
+ u32 *event_cls_supported;
+ u32 nevent_cls_supported;
+ unsigned int enabled;
+ unsigned int iommu_attach_count;
+ struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters: Call to reset counters
+ * @check_for_overflow: Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+ void (*initialize_hw)(const struct iommu_pmon *);
+ unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+ void (*grp_enable)(struct iommu_info *, unsigned int);
+ void (*grp_disable)(struct iommu_info *, unsigned int);
+ void (*enable_pm)(struct iommu_info *);
+ void (*disable_pm)(struct iommu_info *);
+ void (*reset_counters)(const struct iommu_info *);
+ void (*check_for_overflow)(struct iommu_pmon *);
+ irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+ void (*counter_enable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*counter_disable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*ovfl_int_enable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*ovfl_int_disable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+ unsigned int);
+ unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+ */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+ */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+ return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+ return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+ return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h
new file mode 100644
index 0000000000000..4de0d7ef19e63
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_priv.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ * unaligned_fl_table: Original address of memory for the page table.
+ * fl_table is manually aligned (as per spec) but we need the original address
+ * to free the table.
+ * fl_table_shadow: This is "copy" of the fl_table with some differences.
+ * It stores the same information as fl_table except that instead of storing
+ * second level page table address + page table entry descriptor bits it
+ * stores the second level page table address and the number of used second
+ * level page tables entries. This is used to check whether we need to free
+ * the second level page table which allows us to also free the second level
+ * page table after doing a TLB invalidate which should catch bugs with
+ * clients trying to unmap an address that is being used.
+ * fl_table_shadow will use the lower 9 bits for the use count and the upper
+ * bits for the second level page table address.
+ * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
+ * level page tables.
+ */
+#ifdef CONFIG_IOMMU_LPAE
+struct msm_iommu_pt {
+ u64 *fl_table;
+ u64 **sl_table_shadow;
+ int redirect;
+ u64 *unaligned_fl_table;
+};
+#else
+struct msm_iommu_pt {
+ u32 *fl_table;
+ int redirect;
+ u32 *fl_table_shadow;
+};
+#endif
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+ struct msm_iommu_pt pt;
+ struct list_head list_attached;
+ struct iommu_domain domain;
+ const char *client_name;
+};
+
+static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_iommu_priv, domain);
+}
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c
new file mode 100644
index 0000000000000..573245d4a57ef
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_sec.c
@@ -0,0 +1,795 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include <linux/qcom_iommu.h>
+#include <trace/events/kmem.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+/* commands for SCM_SVC_MP */
+#define IOMMU_SECURE_CFG 2
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+/* commands for SCM_SVC_UTIL */
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
+#define SCM_SVC_MP 0xc
+
+static struct iommu_access_ops *iommu_access_ops;
+static int is_secure;
+
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
+struct msm_scm_paddr_list {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+};
+
+struct msm_scm_map2_req {
+ struct msm_scm_paddr_list plist;
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_cp_pool_size {
+ uint32_t size;
+ uint32_t spare;
+};
+
+#define NUM_DUMP_REGS 14
+/*
+ * some space to allow the number of registers returned by the secure
+ * environment to grow
+ */
+#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
+/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
+#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
+
+struct msm_scm_fault_regs_dump {
+ uint32_t dump_size;
+ uint32_t dump_data[SEC_DUMP_SIZE];
+} __aligned(PAGE_SIZE);
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+ iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
+ struct msm_scm_fault_regs_dump *regs)
+{
+ int ret;
+
+ dmac_clean_range(regs, regs + 1);
+
+ ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num,
+ virt_to_phys(regs), sizeof(*regs));
+
+ dmac_inv_range(regs, regs + 1);
+
+ return ret;
+}
+
+static int msm_iommu_reg_dump_to_regs(
+ struct msm_iommu_context_reg ctx_regs[],
+ struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ int i, j, ret = 0;
+ const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
+ uint32_t *it = (uint32_t *) dump->dump_data;
+ const uint32_t * const end = ((uint32_t *) dump) + nvals;
+ phys_addr_t phys_base = drvdata->phys_base;
+ int ctx = ctx_drvdata->num;
+
+ if (!nvals)
+ return -EINVAL;
+
+ for (i = 1; it < end; it += 2, i += 2) {
+ unsigned int reg_offset;
+ uint32_t addr = *it;
+ uint32_t val = *(it + 1);
+ struct msm_iommu_context_reg *reg = NULL;
+ if (addr < phys_base) {
+ pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n",
+ addr, &phys_base);
+ continue;
+ }
+ reg_offset = addr - phys_base;
+
+ for (j = 0; j < MAX_DUMP_REGS; ++j) {
+ struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j];
+ void *test_reg;
+ unsigned int test_offset;
+ switch (dump_reg.dump_reg_type) {
+ case DRT_CTX_REG:
+ test_reg = CTX_REG(dump_reg.reg_offset,
+ drvdata->cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ test_reg = GLB_REG(
+ dump_reg.reg_offset, drvdata->glb_base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ test_reg = GLB_REG_N(
+ drvdata->glb_base, ctx,
+ dump_reg.reg_offset);
+ break;
+ default:
+ pr_err("Unknown dump_reg_type: 0x%x\n",
+ dump_reg.dump_reg_type);
+ BUG();
+ break;
+ }
+ test_offset = test_reg - drvdata->glb_base;
+ if (test_offset == reg_offset) {
+ reg = &ctx_regs[j];
+ break;
+ }
+ }
+
+ if (reg == NULL) {
+ pr_debug("Unknown register in secure CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ if (reg->valid) {
+ WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ reg->val = val;
+ reg->valid = true;
+ }
+
+ if (i != nvals) {
+ pr_err("Invalid dump! %d != %d\n", i, nvals);
+ ret = 1;
+ }
+
+ for (i = 0; i < MAX_DUMP_REGS; ++i) {
+ if (!ctx_regs[i].valid) {
+ if (dump_regs_tbl[i].must_be_present) {
+ pr_err("Register missing from dump for ctx %d: %s, 0x%x\n",
+ ctx,
+ dump_regs_tbl[i].name,
+ dump_regs_tbl[i].reg_offset);
+ ret = 1;
+ }
+ ctx_regs[i].val = 0xd00dfeed;
+ }
+ }
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_scm_fault_regs_dump *regs;
+ int tmp, ret = IRQ_HANDLED;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (!regs) {
+ pr_err("%s: Couldn't allocate memory\n", __func__);
+ goto lock_release;
+ }
+
+ if (!drvdata->ctx_attach_count) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ goto free_regs;
+ }
+
+ iommu_access_ops->iommu_clk_on(drvdata);
+ tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
+ ctx_drvdata->num, regs);
+ iommu_access_ops->iommu_clk_off(drvdata);
+
+ if (tmp) {
+ pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
+ __func__, tmp, drvdata->name, ctx_drvdata->num);
+ goto free_regs;
+ } else {
+ struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
+ memset(ctx_regs, 0, sizeof(ctx_regs));
+ tmp = msm_iommu_reg_dump_to_regs(
+ ctx_regs, regs, drvdata, ctx_drvdata);
+ if (tmp < 0) {
+ ret = IRQ_NONE;
+ pr_err("Incorrect response from secure environment\n");
+ goto free_regs;
+ }
+
+ if (ctx_regs[DUMP_REG_FSR].val) {
+ if (tmp)
+ pr_err("Incomplete fault register dump. Printout will be incomplete.\n");
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ tmp = -ENOSYS;
+ } else {
+ tmp = report_iommu_fault(
+ ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ COMBINE_DUMP_REG(
+ ctx_regs[DUMP_REG_FAR1].val,
+ ctx_regs[DUMP_REG_FAR0].val),
+ 0);
+ }
+
+ /* if the fault wasn't handled by someone else: */
+ if (tmp == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(ctx_regs);
+ }
+ } else {
+ ret = IRQ_NONE;
+ }
+ }
+free_regs:
+ kfree(regs);
+lock_release:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ if (drvdata->smmu_local_base) {
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+ mb();
+ }
+
+ return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num);
+}
+
+static int msm_iommu_sec_map2(struct msm_scm_map2_req *map)
+{
+ u32 flags;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ flags = IOMMU_TLBINVAL_FLAG;
+#else
+ flags = 0;
+#endif
+
+ return qcom_scm_iommu_secure_map(map->plist.list,
+ map->plist.list_size,
+ map->plist.size,
+ map->info.id,
+ map->info.ctx_id,
+ map->info.va,
+ map->info.size,
+ flags);
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, phys_addr_t pa, size_t len)
+{
+ struct msm_scm_map2_req map;
+ void *flush_va, *flush_va_end;
+ int ret = 0;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
+ !IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ flush_va = &pa;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, struct scatterlist *sg, size_t len)
+{
+ struct scatterlist *sgiter;
+ struct msm_scm_map2_req map;
+ unsigned int *pa_list = 0;
+ unsigned int pa, cnt;
+ void *flush_va, *flush_va_end;
+ unsigned int offset = 0, chunk_offset = 0;
+ int ret;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ if (sg->length == len) {
+ /*
+ * physical address for secure mapping needs
+ * to be 1MB aligned
+ */
+ pa = get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = get_phys_addr(sgiter);
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+ while (offset < len) {
+ pa += chunk_offset;
+ pa_list[cnt] = pa;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = get_phys_addr(sgiter);
+ }
+ }
+
+ map.plist.list = virt_to_phys(pa_list);
+ map.plist.list_size = cnt;
+ map.plist.size = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (map.plist.list_size * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ kfree(pa_list);
+
+ return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, size_t len)
+{
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id,
+ ctx_drvdata->num,
+ va,
+ len,
+ IOMMU_TLBINVAL_FLAG);
+}
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ domain = &priv->domain;
+ return domain;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+ priv = to_msm_priv(domain);
+
+ kfree(priv);
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+
+ /* bfb settings are always programmed by HLOS */
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+ }
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+ iommu_access_ops->iommu_lock_release(0);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto fail;
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+ struct msm_iommu_drvdata **iommu_drvdata,
+ struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_drvdata *ctx;
+
+ list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+ if (ctx->attached_domain == domain)
+ break;
+ }
+
+ if (ctx->attached_domain != domain)
+ return -EINVAL;
+
+ *ctx_drvdata = ctx;
+ *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+ return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+ va, pa, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -ENODEV;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+ va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+ struct scatterlist *sg, unsigned int len,
+ int prot)
+{
+ int ret;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+ va, sg, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -EINVAL;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret ? ret : 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ return 0;
+}
+
+void msm_iommu_check_scm_call_avail(void)
+{
+ is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG);
+}
+
+int msm_iommu_get_scm_call_avail(void)
+{
+ return is_secure;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+/* .map_range = msm_iommu_map_range,*/
+ .map_sg = default_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+ int ret;
+
+ ret = bus_register(&msm_iommu_sec_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+ if (ret) {
+ bus_unregister(&msm_iommu_sec_bus_type);
+ return ret;
+ }
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index cfc493c2e30a7..c4e41c26649e9 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -3,7 +3,6 @@ menu "EEPROM support"
config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
- select REGMAP
select NVMEM
help
Enable this driver to get read/write support to most I2C EEPROMs
@@ -32,7 +31,6 @@ config EEPROM_AT24
config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
- select REGMAP
select NVMEM
help
Enable this driver to get read/write support to most SPI EEPROMs,
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 089d6943f68ab..de550a605f7dc 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -23,7 +23,6 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/platform_data/at24.h>
/*
@@ -69,7 +68,6 @@ struct at24_data {
unsigned write_max;
unsigned num_addresses;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
@@ -252,10 +250,10 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
return -ETIMEDOUT;
}
-static ssize_t at24_read(struct at24_data *at24,
- char *buf, loff_t off, size_t count)
+static int at24_read(void *priv, unsigned int off, void *val, size_t count)
{
- ssize_t retval = 0;
+ struct at24_data *at24 = priv;
+ char *buf = val;
if (unlikely(!count))
return count;
@@ -267,23 +265,21 @@ static ssize_t at24_read(struct at24_data *at24,
mutex_lock(&at24->lock);
while (count) {
- ssize_t status;
+ int status;
status = at24_eeprom_read(at24, buf, off, count);
- if (status <= 0) {
- if (retval == 0)
- retval = status;
- break;
+ if (status < 0) {
+ mutex_unlock(&at24->lock);
+ return status;
}
buf += status;
off += status;
count -= status;
- retval += status;
}
mutex_unlock(&at24->lock);
- return retval;
+ return 0;
}
/*
@@ -372,13 +368,13 @@ static ssize_t at24_eeprom_write(struct at24_data *at24, const char *buf,
return -ETIMEDOUT;
}
-static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
- size_t count)
+static int at24_write(void *priv, unsigned int off, void *val, size_t count)
{
- ssize_t retval = 0;
+ struct at24_data *at24 = priv;
+ char *buf = val;
if (unlikely(!count))
- return count;
+ return -EINVAL;
/*
* Write data to chip, protecting against concurrent updates
@@ -387,70 +383,23 @@ static ssize_t at24_write(struct at24_data *at24, const char *buf, loff_t off,
mutex_lock(&at24->lock);
while (count) {
- ssize_t status;
+ int status;
status = at24_eeprom_write(at24, buf, off, count);
- if (status <= 0) {
- if (retval == 0)
- retval = status;
- break;
+ if (status < 0) {
+ mutex_unlock(&at24->lock);
+ return status;
}
buf += status;
off += status;
count -= status;
- retval += status;
}
mutex_unlock(&at24->lock);
- return retval;
-}
-
-/*-------------------------------------------------------------------------*/
-
-/*
- * Provide a regmap interface, which is registered with the NVMEM
- * framework
-*/
-static int at24_regmap_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct at24_data *at24 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = at24_read(at24, val, offset, val_size);
- if (err)
- return err;
- return 0;
-}
-
-static int at24_regmap_write(void *context, const void *data, size_t count)
-{
- struct at24_data *at24 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = at24_write(at24, buf, offset, len);
- if (err)
- return err;
return 0;
}
-static const struct regmap_bus at24_regmap_bus = {
- .read = at24_regmap_read,
- .write = at24_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-/*-------------------------------------------------------------------------*/
-
#ifdef CONFIG_OF
static void at24_get_ofdata(struct i2c_client *client,
struct at24_platform_data *chip)
@@ -482,7 +431,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct at24_data *at24;
int err;
unsigned i, num_addresses;
- struct regmap *regmap;
if (client->dev.platform_data) {
chip = *(struct at24_platform_data *)client->dev.platform_data;
@@ -612,19 +560,6 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
}
- at24->regmap_config.reg_bits = 32;
- at24->regmap_config.val_bits = 8;
- at24->regmap_config.reg_stride = 1;
- at24->regmap_config.max_register = chip.byte_len - 1;
-
- regmap = devm_regmap_init(&client->dev, &at24_regmap_bus, at24,
- &at24->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&client->dev, "regmap init failed\n");
- err = PTR_ERR(regmap);
- goto err_clients;
- }
-
at24->nvmem_config.name = dev_name(&client->dev);
at24->nvmem_config.dev = &client->dev;
at24->nvmem_config.read_only = !writable;
@@ -632,6 +567,12 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id)
at24->nvmem_config.owner = THIS_MODULE;
at24->nvmem_config.compat = true;
at24->nvmem_config.base_dev = &client->dev;
+ at24->nvmem_config.reg_read = at24_read;
+ at24->nvmem_config.reg_write = at24_write;
+ at24->nvmem_config.priv = at24;
+ at24->nvmem_config.stride = 4;
+ at24->nvmem_config.word_size = 1;
+ at24->nvmem_config.size = chip.byte_len;
at24->nvmem = nvmem_register(&at24->nvmem_config);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index fa36a6e37084d..a2858b33585e3 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -17,7 +17,6 @@
#include <linux/sched.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <linux/property.h>
@@ -34,7 +33,6 @@ struct at25_data {
struct mutex lock;
struct spi_eeprom chip;
unsigned addrlen;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
};
@@ -65,14 +63,11 @@ struct at25_data {
#define io_limit PAGE_SIZE /* bytes */
-static ssize_t
-at25_ee_read(
- struct at25_data *at25,
- char *buf,
- unsigned offset,
- size_t count
-)
+static int at25_ee_read(void *priv, unsigned int offset,
+ void *val, size_t count)
{
+ struct at25_data *at25 = priv;
+ char *buf = val;
u8 command[EE_MAXADDRLEN + 1];
u8 *cp;
ssize_t status;
@@ -81,11 +76,11 @@ at25_ee_read(
u8 instr;
if (unlikely(offset >= at25->chip.byte_len))
- return 0;
+ return -EINVAL;
if ((offset + count) > at25->chip.byte_len)
count = at25->chip.byte_len - offset;
if (unlikely(!count))
- return count;
+ return -EINVAL;
cp = command;
@@ -131,28 +126,14 @@ at25_ee_read(
count, offset, (int) status);
mutex_unlock(&at25->lock);
- return status ? status : count;
+ return status;
}
-static int at25_regmap_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
{
- struct at25_data *at25 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = at25_ee_read(at25, val, offset, val_size);
- if (err)
- return err;
- return 0;
-}
-
-static ssize_t
-at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
- size_t count)
-{
- ssize_t status = 0;
- unsigned written = 0;
+ struct at25_data *at25 = priv;
+ const char *buf = val;
+ int status = 0;
unsigned buf_size;
u8 *bounce;
@@ -161,7 +142,7 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
if ((off + count) > at25->chip.byte_len)
count = at25->chip.byte_len - off;
if (unlikely(!count))
- return count;
+ return -EINVAL;
/* Temp buffer starts with command and address */
buf_size = at25->chip.page_size;
@@ -256,40 +237,15 @@ at25_ee_write(struct at25_data *at25, const char *buf, loff_t off,
off += segment;
buf += segment;
count -= segment;
- written += segment;
} while (count > 0);
mutex_unlock(&at25->lock);
kfree(bounce);
- return written ? written : status;
+ return status;
}
-static int at25_regmap_write(void *context, const void *data, size_t count)
-{
- struct at25_data *at25 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = at25_ee_write(at25, buf, offset, len);
- if (err)
- return err;
- return 0;
-}
-
-static const struct regmap_bus at25_regmap_bus = {
- .read = at25_regmap_read,
- .write = at25_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
/*-------------------------------------------------------------------------*/
static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
@@ -349,7 +305,6 @@ static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
struct spi_eeprom chip;
- struct regmap *regmap;
int err;
int sr;
int addrlen;
@@ -394,18 +349,6 @@ static int at25_probe(struct spi_device *spi)
spi_set_drvdata(spi, at25);
at25->addrlen = addrlen;
- at25->regmap_config.reg_bits = 32;
- at25->regmap_config.val_bits = 8;
- at25->regmap_config.reg_stride = 1;
- at25->regmap_config.max_register = chip.byte_len - 1;
-
- regmap = devm_regmap_init(&spi->dev, &at25_regmap_bus, at25,
- &at25->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
at25->nvmem_config.name = dev_name(&spi->dev);
at25->nvmem_config.dev = &spi->dev;
at25->nvmem_config.read_only = chip.flags & EE_READONLY;
@@ -413,6 +356,12 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.owner = THIS_MODULE;
at25->nvmem_config.compat = true;
at25->nvmem_config.base_dev = &spi->dev;
+ at25->nvmem_config.reg_read = at25_ee_read;
+ at25->nvmem_config.reg_write = at25_ee_write;
+ at25->nvmem_config.priv = at25;
+ at25->nvmem_config.stride = 4;
+ at25->nvmem_config.word_size = 1;
+ at25->nvmem_config.size = chip.byte_len;
at25->nvmem = nvmem_register(&at25->nvmem_config);
if (IS_ERR(at25->nvmem))
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 426fe2fd5238c..5004d72c9f42f 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
-#include <linux/regmap.h>
#include <linux/eeprom_93xx46.h>
#define OP_START 0x4
@@ -43,7 +42,6 @@ struct eeprom_93xx46_dev {
struct spi_device *spi;
struct eeprom_93xx46_platform_data *pdata;
struct mutex lock;
- struct regmap_config regmap_config;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
int addrlen;
@@ -60,11 +58,12 @@ static inline bool has_quirk_instruction_length(struct eeprom_93xx46_dev *edev)
return edev->pdata->quirks & EEPROM_93XX46_QUIRK_INSTRUCTION_LENGTH;
}
-static ssize_t
-eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
- unsigned off, size_t count)
+static int eeprom_93xx46_read(void *priv, unsigned int off,
+ void *val, size_t count)
{
- ssize_t ret = 0;
+ struct eeprom_93xx46_dev *edev = priv;
+ char *buf = val;
+ int err = 0;
if (unlikely(off >= edev->size))
return 0;
@@ -84,7 +83,6 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
u16 cmd_addr = OP_READ << edev->addrlen;
size_t nbytes = count;
int bits;
- int err;
if (edev->addrlen == 7) {
cmd_addr |= off & 0x7f;
@@ -120,21 +118,20 @@ eeprom_93xx46_read(struct eeprom_93xx46_dev *edev, char *buf,
if (err) {
dev_err(&edev->spi->dev, "read %zu bytes at %d: err. %d\n",
nbytes, (int)off, err);
- ret = err;
break;
}
buf += nbytes;
off += nbytes;
count -= nbytes;
- ret += nbytes;
}
if (edev->pdata->finish)
edev->pdata->finish(edev);
mutex_unlock(&edev->lock);
- return ret;
+
+ return err;
}
static int eeprom_93xx46_ew(struct eeprom_93xx46_dev *edev, int is_on)
@@ -230,10 +227,11 @@ eeprom_93xx46_write_word(struct eeprom_93xx46_dev *edev,
return ret;
}
-static ssize_t
-eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
- loff_t off, size_t count)
+static int eeprom_93xx46_write(void *priv, unsigned int off,
+ void *val, size_t count)
{
+ struct eeprom_93xx46_dev *edev = priv;
+ char *buf = val;
int i, ret, step = 1;
if (unlikely(off >= edev->size))
@@ -275,52 +273,9 @@ eeprom_93xx46_write(struct eeprom_93xx46_dev *edev, const char *buf,
/* erase/write disable */
eeprom_93xx46_ew(edev, 0);
- return ret ? : count;
-}
-
-/*
- * Provide a regmap interface, which is registered with the NVMEM
- * framework
-*/
-static int eeprom_93xx46_regmap_read(void *context, const void *reg,
- size_t reg_size, void *val,
- size_t val_size)
-{
- struct eeprom_93xx46_dev *eeprom_93xx46 = context;
- off_t offset = *(u32 *)reg;
- int err;
-
- err = eeprom_93xx46_read(eeprom_93xx46, val, offset, val_size);
- if (err)
- return err;
- return 0;
-}
-
-static int eeprom_93xx46_regmap_write(void *context, const void *data,
- size_t count)
-{
- struct eeprom_93xx46_dev *eeprom_93xx46 = context;
- const char *buf;
- u32 offset;
- size_t len;
- int err;
-
- memcpy(&offset, data, sizeof(offset));
- buf = (const char *)data + sizeof(offset);
- len = count - sizeof(offset);
-
- err = eeprom_93xx46_write(eeprom_93xx46, buf, offset, len);
- if (err)
- return err;
- return 0;
+ return ret;
}
-static const struct regmap_bus eeprom_93xx46_regmap_bus = {
- .read = eeprom_93xx46_regmap_read,
- .write = eeprom_93xx46_regmap_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
static int eeprom_93xx46_eral(struct eeprom_93xx46_dev *edev)
{
struct eeprom_93xx46_platform_data *pd = edev->pdata;
@@ -480,7 +435,6 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
{
struct eeprom_93xx46_platform_data *pd;
struct eeprom_93xx46_dev *edev;
- struct regmap *regmap;
int err;
if (spi->dev.of_node) {
@@ -515,20 +469,6 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->pdata = pd;
edev->size = 128;
-
- edev->regmap_config.reg_bits = 32;
- edev->regmap_config.val_bits = 8;
- edev->regmap_config.reg_stride = 1;
- edev->regmap_config.max_register = edev->size - 1;
-
- regmap = devm_regmap_init(&spi->dev, &eeprom_93xx46_regmap_bus, edev,
- &edev->regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(&spi->dev, "regmap init failed\n");
- err = PTR_ERR(regmap);
- goto fail;
- }
-
edev->nvmem_config.name = dev_name(&spi->dev);
edev->nvmem_config.dev = &spi->dev;
edev->nvmem_config.read_only = pd->flags & EE_READONLY;
@@ -536,6 +476,12 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->nvmem_config.owner = THIS_MODULE;
edev->nvmem_config.compat = true;
edev->nvmem_config.base_dev = &spi->dev;
+ edev->nvmem_config.reg_read = eeprom_93xx46_read;
+ edev->nvmem_config.reg_write = eeprom_93xx46_write;
+ edev->nvmem_config.priv = edev;
+ edev->nvmem_config.stride = 4;
+ edev->nvmem_config.word_size = 1;
+ edev->nvmem_config.size = edev->size;
edev->nvmem = nvmem_register(&edev->nvmem_config);
if (IS_ERR(edev->nvmem)) {
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 4c33d7690f2f6..250f223aaa803 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -1,3 +1,24 @@
#
# MMC core configuration
#
+config PWRSEQ_EMMC
+ tristate "HW reset support for eMMC"
+ default y
+ depends on OF
+ help
+ This selects Hardware reset support aka pwrseq-emmc for eMMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_emmc.
+
+config PWRSEQ_SIMPLE
+ tristate "Simple HW reset support for MMC"
+ default y
+ depends on OF
+ help
+ This selects simple hardware reset support aka pwrseq-simple for MMC
+ devices. By default this option is set to y.
+
+ This driver can also be built as a module. If so, the module
+ will be called pwrseq_simple.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b73..f007151dfdc6a 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d1757dbf97..9386c4771814b 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,88 +8,55 @@
* MMC power sequence management
*/
#include <linux/kernel.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <linux/mmc/host.h>
#include "pwrseq.h"
-struct mmc_pwrseq_match {
- const char *compatible;
- struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
- {
- .compatible = "mmc-pwrseq-simple",
- .alloc = mmc_pwrseq_simple_alloc,
- }, {
- .compatible = "mmc-pwrseq-emmc",
- .alloc = mmc_pwrseq_emmc_alloc,
- },
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
-{
- struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
- if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
- match = &pwrseq_match[i];
- break;
- }
- }
-
- return match;
-}
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
int mmc_pwrseq_alloc(struct mmc_host *host)
{
- struct platform_device *pdev;
struct device_node *np;
- struct mmc_pwrseq_match *match;
- struct mmc_pwrseq *pwrseq;
- int ret = 0;
+ struct mmc_pwrseq *p;
np = of_parse_phandle(host->parent->of_node, "mmc-pwrseq", 0);
if (!np)
return 0;
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- ret = -ENODEV;
- goto err;
- }
+ mutex_lock(&pwrseq_list_mutex);
+ list_for_each_entry(p, &pwrseq_list, pwrseq_node) {
+ if (p->dev->of_node == np) {
+ if (!try_module_get(p->owner))
+ dev_err(host->parent,
+ "increasing module refcount failed\n");
+ else
+ host->pwrseq = p;
- match = mmc_pwrseq_find(np);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err;
+ break;
+ }
}
- pwrseq = match->alloc(host, &pdev->dev);
- if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(pwrseq);
- goto err;
- }
+ of_node_put(np);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ if (!host->pwrseq)
+ return -EPROBE_DEFER;
- host->pwrseq = pwrseq;
dev_info(host->parent, "allocated mmc-pwrseq\n");
-err:
- of_node_put(np);
- return ret;
+ return 0;
}
void mmc_pwrseq_pre_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->pre_power_on)
+ if (pwrseq && pwrseq->ops->pre_power_on)
pwrseq->ops->pre_power_on(host);
}
@@ -97,7 +64,7 @@ void mmc_pwrseq_post_power_on(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->post_power_on)
+ if (pwrseq && pwrseq->ops->post_power_on)
pwrseq->ops->post_power_on(host);
}
@@ -105,7 +72,7 @@ void mmc_pwrseq_power_off(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->power_off)
+ if (pwrseq && pwrseq->ops->power_off)
pwrseq->ops->power_off(host);
}
@@ -113,8 +80,31 @@ void mmc_pwrseq_free(struct mmc_host *host)
{
struct mmc_pwrseq *pwrseq = host->pwrseq;
- if (pwrseq && pwrseq->ops && pwrseq->ops->free)
- pwrseq->ops->free(host);
+ if (pwrseq) {
+ module_put(pwrseq->owner);
+ host->pwrseq = NULL;
+ }
+}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
- host->pwrseq = NULL;
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->pwrseq_node, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (pwrseq) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->pwrseq_node);
+ mutex_unlock(&pwrseq_list_mutex);
+ }
}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 133de04266878..d69e751f148b8 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,32 +8,39 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
+#include <linux/mmc/host.h>
+
struct mmc_pwrseq_ops {
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
- void (*free)(struct mmc_host *host);
};
struct mmc_pwrseq {
const struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head pwrseq_node;
+ struct module *owner;
};
#ifdef CONFIG_OF
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev);
-
#else
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+static inline void mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq) {}
static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 4a82bc77fe497..adc9c0c614fb1 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,9 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_emmc {
struct gpio_desc *reset_gpio;
};
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
{
gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,27 +40,11 @@ static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
__mmc_pwrseq_emmc_reset(pwrseq);
}
-static void mmc_pwrseq_emmc_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
-
- unregister_restart_handler(&pwrseq->reset_nb);
- gpiod_put(pwrseq->reset_gpio);
- kfree(pwrseq);
-}
-
-static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
- .free = mmc_pwrseq_emmc_free,
-};
-
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
@@ -66,21 +55,22 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
return NOTIFY_DONE;
}
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .post_power_on = mmc_pwrseq_emmc_reset,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_emmc *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->reset_gpio = gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(pwrseq->reset_gpio)) {
- ret = PTR_ERR(pwrseq->reset_gpio);
- goto free;
- }
+ pwrseq->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
/*
* register reset handler to ensure emmc reset also from
@@ -92,9 +82,38 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
register_restart_handler(&pwrseq->reset_nb);
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
+
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
+}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq = platform_get_drvdata(pdev);
+
+ unregister_restart_handler(&pwrseq->reset_nb);
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
- return &pwrseq->pwrseq;
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return 0;
}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index bc173e18b71cd..450d907c6e6c6 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -8,7 +8,10 @@
* Simple MMC power sequence management
*/
#include <linux/clk.h>
+#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -25,6 +28,8 @@ struct mmc_pwrseq_simple {
struct gpio_descs *reset_gpios;
};
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
@@ -44,8 +49,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
@@ -57,16 +61,14 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
@@ -76,59 +78,64 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
}
}
-static void mmc_pwrseq_simple_free(struct mmc_host *host)
-{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
-
- if (!IS_ERR(pwrseq->reset_gpios))
- gpiod_put_array(pwrseq->reset_gpios);
-
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-
- kfree(pwrseq);
-}
-
static const struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
.pre_power_on = mmc_pwrseq_simple_pre_power_on,
.post_power_on = mmc_pwrseq_simple_post_power_on,
.power_off = mmc_pwrseq_simple_power_off,
- .free = mmc_pwrseq_simple_free,
};
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev)
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
{
struct mmc_pwrseq_simple *pwrseq;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- pwrseq = kzalloc(sizeof(*pwrseq), GFP_KERNEL);
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
- pwrseq->ext_clk = clk_get(dev, "ext_clock");
- if (IS_ERR(pwrseq->ext_clk) &&
- PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
- ret = PTR_ERR(pwrseq->ext_clk);
- goto free;
- }
+ pwrseq->ext_clk = devm_clk_get(dev, "ext_clock");
+ if (IS_ERR(pwrseq->ext_clk) && PTR_ERR(pwrseq->ext_clk) != -ENOENT)
+ return PTR_ERR(pwrseq->ext_clk);
- pwrseq->reset_gpios = gpiod_get_array(dev, "reset", GPIOD_OUT_HIGH);
+ pwrseq->reset_gpios = devm_gpiod_get_array(dev, "reset",
+ GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios) &&
PTR_ERR(pwrseq->reset_gpios) != -ENOENT &&
PTR_ERR(pwrseq->reset_gpios) != -ENOSYS) {
- ret = PTR_ERR(pwrseq->reset_gpios);
- goto clk_put;
+ return PTR_ERR(pwrseq->reset_gpios);
}
+ pwrseq->pwrseq.dev = dev;
pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+ pwrseq->pwrseq.owner = THIS_MODULE;
+ platform_set_drvdata(pdev, pwrseq);
- return &pwrseq->pwrseq;
-clk_put:
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *pwrseq = platform_get_drvdata(pdev);
+
+ mmc_pwrseq_unregister(&pwrseq->pwrseq);
+
+ return 0;
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 2e6c96845c9a6..b45dd09da2d54 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -77,6 +77,7 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @any_blksize: true if block any sizes are supported
*/
struct variant_data {
unsigned int clkreg;
@@ -103,6 +104,7 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool any_blksize;
};
static struct variant_data variant_arm = {
@@ -200,6 +202,7 @@ static struct variant_data variant_ux500v2 = {
.pwrreg_clkgate = true,
.busy_detect = true,
.pwrreg_nopower = true,
+ .any_blksize = true,
};
static struct variant_data variant_qcom = {
@@ -218,6 +221,7 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .any_blksize = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -245,10 +249,11 @@ static int mmci_card_busy(struct mmc_host *mmc)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->any_blksize) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -804,7 +809,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index ca52952d850f6..c4b124ec163dc 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -1,6 +1,5 @@
menuconfig NVMEM
tristate "NVMEM Support"
- select REGMAP
help
Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES...
@@ -49,7 +48,7 @@ config NVMEM_MXS_OCOTP
config MTK_EFUSE
tristate "Mediatek SoCs EFUSE support"
depends on ARCH_MEDIATEK || COMPILE_TEST
- select REGMAP_MMIO
+ depends on HAS_IOMEM
help
This is a driver to access hardware related data like sensor
calibration, HDMI impedance etc.
@@ -61,7 +60,6 @@ config QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
depends on HAS_IOMEM
- select REGMAP_MMIO
help
Say y here to enable QFPROM support. The QFPROM provides access
functions for QFPROM data to rest of the drivers via nvmem interface.
@@ -83,7 +81,6 @@ config ROCKCHIP_EFUSE
config NVMEM_SUNXI_SID
tristate "Allwinner SoCs SID support"
depends on ARCH_SUNXI
- select REGMAP_MMIO
help
This is a driver for the 'security ID' available on various Allwinner
devices.
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 0de3d878c4393..bb4ea123547f1 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -23,12 +23,10 @@
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
struct nvmem_device {
const char *name;
- struct regmap *regmap;
struct module *owner;
struct device dev;
int stride;
@@ -41,6 +39,9 @@ struct nvmem_device {
int flags;
struct bin_attribute eeprom;
struct device *base_dev;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ void *priv;
};
#define FLAG_COMPAT BIT(0)
@@ -66,6 +67,23 @@ static struct lock_class_key eeprom_lock_key;
#endif
#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
+static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_read)
+ return nvmem->reg_read(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
+
+static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
+ void *val, size_t bytes)
+{
+ if (nvmem->reg_write)
+ return nvmem->reg_write(nvmem->priv, offset, val, bytes);
+
+ return -EINVAL;
+}
static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
@@ -93,7 +111,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
- rc = regmap_raw_read(nvmem->regmap, pos, buf, count);
+ rc = nvmem_reg_read(nvmem, pos, buf, count);
if (IS_ERR_VALUE(rc))
return rc;
@@ -127,7 +145,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
count = round_down(count, nvmem->word_size);
- rc = regmap_raw_write(nvmem->regmap, pos, buf, count);
+ rc = nvmem_reg_write(nvmem, pos, buf, count);
if (IS_ERR_VALUE(rc))
return rc;
@@ -421,18 +439,11 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
{
struct nvmem_device *nvmem;
struct device_node *np;
- struct regmap *rm;
int rval;
if (!config->dev)
return ERR_PTR(-EINVAL);
- rm = dev_get_regmap(config->dev, NULL);
- if (!rm) {
- dev_err(config->dev, "Regmap not found\n");
- return ERR_PTR(-EINVAL);
- }
-
nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
if (!nvmem)
return ERR_PTR(-ENOMEM);
@@ -444,14 +455,16 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
}
nvmem->id = rval;
- nvmem->regmap = rm;
nvmem->owner = config->owner;
- nvmem->stride = regmap_get_reg_stride(rm);
- nvmem->word_size = regmap_get_val_bytes(rm);
- nvmem->size = regmap_get_max_register(rm) + nvmem->stride;
+ nvmem->stride = config->stride;
+ nvmem->word_size = config->word_size;
+ nvmem->size = config->size;
nvmem->dev.type = &nvmem_provider_type;
nvmem->dev.bus = &nvmem_bus_type;
nvmem->dev.parent = config->dev;
+ nvmem->priv = config->priv;
+ nvmem->reg_read = config->reg_read;
+ nvmem->reg_write = config->reg_write;
np = config->dev->of_node;
nvmem->dev.of_node = np;
dev_set_name(&nvmem->dev, "%s%d",
@@ -948,7 +961,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
{
int rc;
- rc = regmap_raw_read(nvmem->regmap, cell->offset, buf, cell->bytes);
+ rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
if (IS_ERR_VALUE(rc))
return rc;
@@ -977,7 +990,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
u8 *buf;
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return ERR_PTR(-EINVAL);
buf = kzalloc(cell->bytes, GFP_KERNEL);
@@ -1014,7 +1027,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
*b <<= bit_offset;
/* setup the first byte with lsb bits from nvmem */
- rc = regmap_raw_read(nvmem->regmap, cell->offset, &v, 1);
+ rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
*b++ |= GENMASK(bit_offset - 1, 0) & v;
/* setup rest of the byte if any */
@@ -1031,7 +1044,7 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
/* if it's not end on byte boundary */
if ((nbits + bit_offset) % BITS_PER_BYTE) {
/* setup the last byte with msb bits from nvmem */
- rc = regmap_raw_read(nvmem->regmap,
+ rc = nvmem_reg_read(nvmem,
cell->offset + cell->bytes - 1, &v, 1);
*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
@@ -1054,7 +1067,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
struct nvmem_device *nvmem = cell->nvmem;
int rc;
- if (!nvmem || !nvmem->regmap || nvmem->read_only ||
+ if (!nvmem || nvmem->read_only ||
(cell->bit_offset == 0 && len != cell->bytes))
return -EINVAL;
@@ -1064,7 +1077,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
return PTR_ERR(buf);
}
- rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes);
+ rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
/* free the tmp buffer */
if (cell->bit_offset || cell->nbits)
@@ -1094,7 +1107,7 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
int rc;
ssize_t len;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
@@ -1124,7 +1137,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell cell;
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
@@ -1152,10 +1165,10 @@ int nvmem_device_read(struct nvmem_device *nvmem,
{
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
- rc = regmap_raw_read(nvmem->regmap, offset, buf, bytes);
+ rc = nvmem_reg_read(nvmem, offset, buf, bytes);
if (IS_ERR_VALUE(rc))
return rc;
@@ -1180,10 +1193,10 @@ int nvmem_device_write(struct nvmem_device *nvmem,
{
int rc;
- if (!nvmem || !nvmem->regmap)
+ if (!nvmem)
return -EINVAL;
- rc = regmap_raw_write(nvmem->regmap, offset, buf, bytes);
+ rc = nvmem_reg_write(nvmem, offset, buf, bytes);
if (IS_ERR_VALUE(rc))
return rc;
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index d7796eb5421ff..75e66ef5b0ecb 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -22,7 +22,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
struct ocotp_priv {
@@ -31,59 +30,34 @@ struct ocotp_priv {
unsigned int nregs;
};
-static int imx_ocotp_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int imx_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct ocotp_priv *priv = context;
- unsigned int offset = *(u32 *)reg;
unsigned int count;
+ u32 *buf = val;
int i;
u32 index;
index = offset >> 2;
- count = val_size >> 2;
+ count = bytes >> 2;
if (count > (priv->nregs - index))
count = priv->nregs - index;
- for (i = index; i < (index + count); i++) {
- *(u32 *)val = readl(priv->base + 0x400 + i * 0x10);
- val += 4;
- }
+ for (i = index; i < (index + count); i++)
+ *buf++ = readl(priv->base + 0x400 + i * 0x10);
return 0;
}
-static int imx_ocotp_write(void *context, const void *data, size_t count)
-{
- /* Not implemented */
- return 0;
-}
-
-static struct regmap_bus imx_ocotp_bus = {
- .read = imx_ocotp_read,
- .write = imx_ocotp_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool imx_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
-static struct regmap_config imx_ocotp_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
- .writeable_reg = imx_ocotp_writeable_reg,
- .name = "imx-ocotp",
-};
-
static struct nvmem_config imx_ocotp_nvmem_config = {
.name = "imx-ocotp",
.read_only = true,
+ .word_size = 4,
+ .stride = 4,
.owner = THIS_MODULE,
+ .reg_read = imx_ocotp_read,
};
static const struct of_device_id imx_ocotp_dt_ids[] = {
@@ -99,7 +73,6 @@ static int imx_ocotp_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
struct resource *res;
- struct regmap *regmap;
struct ocotp_priv *priv;
struct nvmem_device *nvmem;
@@ -114,15 +87,9 @@ static int imx_ocotp_probe(struct platform_device *pdev)
of_id = of_match_device(imx_ocotp_dt_ids, dev);
priv->nregs = (unsigned int)of_id->data;
- imx_ocotp_regmap_config.max_register = 4 * priv->nregs - 4;
-
- regmap = devm_regmap_init(dev, &imx_ocotp_bus, priv,
- &imx_ocotp_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ imx_ocotp_nvmem_config.size = 4 * priv->nregs;
imx_ocotp_nvmem_config.dev = dev;
+ imx_ocotp_nvmem_config.priv = priv;
nvmem = nvmem_register(&imx_ocotp_nvmem_config);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index 878fce789341c..c81ae4c6da74c 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/reset.h>
/* Registers */
@@ -51,12 +50,7 @@ struct lpc18xx_eeprom_dev {
struct nvmem_device *nvmem;
unsigned reg_bytes;
unsigned val_bytes;
-};
-
-static struct regmap_config lpc18xx_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
+ int size;
};
static inline void lpc18xx_eeprom_writel(struct lpc18xx_eeprom_dev *eeprom,
@@ -95,30 +89,35 @@ static int lpc18xx_eeprom_busywait_until_prog(struct lpc18xx_eeprom_dev *eeprom)
return -ETIMEDOUT;
}
-static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
- size_t reg_size, const void *val,
- size_t val_size)
+static int lpc18xx_eeprom_gather_write(void *context, unsigned int reg,
+ void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = *(u32 *)reg;
+ unsigned int offset = reg;
int ret;
- if (offset % lpc18xx_regmap_config.reg_stride)
+ /*
+ * The last page contains the EEPROM initialization data and is not
+ * writable.
+ */
+ if ((reg > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE) ||
+ (reg + bytes > eeprom->size - LPC18XX_EEPROM_PAGE_SIZE))
return -EINVAL;
+
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
- while (val_size) {
+ while (bytes) {
writel(*(u32 *)val, eeprom->mem_base + offset);
ret = lpc18xx_eeprom_busywait_until_prog(eeprom);
if (ret < 0)
return ret;
- val_size -= eeprom->val_bytes;
+ bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
@@ -129,23 +128,10 @@ static int lpc18xx_eeprom_gather_write(void *context, const void *reg,
return 0;
}
-static int lpc18xx_eeprom_write(void *context, const void *data, size_t count)
+static int lpc18xx_eeprom_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = eeprom->reg_bytes;
-
- if (count <= offset)
- return -EINVAL;
-
- return lpc18xx_eeprom_gather_write(context, data, eeprom->reg_bytes,
- data + offset, count - offset);
-}
-
-static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- struct lpc18xx_eeprom_dev *eeprom = context;
- unsigned int offset = *(u32 *)reg;
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_NO);
@@ -153,9 +139,9 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
/* Wait 100 us while the EEPROM wakes up */
usleep_range(100, 200);
- while (val_size) {
+ while (bytes) {
*(u32 *)val = readl(eeprom->mem_base + offset);
- val_size -= eeprom->val_bytes;
+ bytes -= eeprom->val_bytes;
val += eeprom->val_bytes;
offset += eeprom->val_bytes;
}
@@ -166,31 +152,13 @@ static int lpc18xx_eeprom_read(void *context, const void *reg, size_t reg_size,
return 0;
}
-static struct regmap_bus lpc18xx_eeprom_bus = {
- .write = lpc18xx_eeprom_write,
- .gather_write = lpc18xx_eeprom_gather_write,
- .read = lpc18xx_eeprom_read,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool lpc18xx_eeprom_writeable_reg(struct device *dev, unsigned int reg)
-{
- /*
- * The last page contains the EEPROM initialization data and is not
- * writable.
- */
- return reg <= lpc18xx_regmap_config.max_register -
- LPC18XX_EEPROM_PAGE_SIZE;
-}
-
-static bool lpc18xx_eeprom_readable_reg(struct device *dev, unsigned int reg)
-{
- return reg <= lpc18xx_regmap_config.max_register;
-}
static struct nvmem_config lpc18xx_nvmem_config = {
.name = "lpc18xx-eeprom",
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = lpc18xx_eeprom_read,
+ .reg_write = lpc18xx_eeprom_gather_write,
.owner = THIS_MODULE,
};
@@ -200,7 +168,6 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct reset_control *rst;
unsigned long clk_rate;
- struct regmap *regmap;
struct resource *res;
int ret;
@@ -243,8 +210,8 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
goto err_clk;
}
- eeprom->val_bytes = lpc18xx_regmap_config.val_bits / BITS_PER_BYTE;
- eeprom->reg_bytes = lpc18xx_regmap_config.reg_bits / BITS_PER_BYTE;
+ eeprom->val_bytes = 4;
+ eeprom->reg_bytes = 4;
/*
* Clock rate is generated by dividing the system bus clock by the
@@ -264,19 +231,10 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
lpc18xx_eeprom_writel(eeprom, LPC18XX_EEPROM_PWRDWN,
LPC18XX_EEPROM_PWRDWN_YES);
- lpc18xx_regmap_config.max_register = resource_size(res) - 1;
- lpc18xx_regmap_config.writeable_reg = lpc18xx_eeprom_writeable_reg;
- lpc18xx_regmap_config.readable_reg = lpc18xx_eeprom_readable_reg;
-
- regmap = devm_regmap_init(dev, &lpc18xx_eeprom_bus, eeprom,
- &lpc18xx_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed: %ld\n", PTR_ERR(regmap));
- ret = PTR_ERR(regmap);
- goto err_clk;
- }
-
+ eeprom->size = resource_size(res);
+ lpc18xx_nvmem_config.size = resource_size(res);
lpc18xx_nvmem_config.dev = dev;
+ lpc18xx_nvmem_config.priv = eeprom;
eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
if (IS_ERR(eeprom->nvmem)) {
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 9c49369beea56..32fd572e18c54 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -14,15 +14,35 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-static struct regmap_config mtk_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
+static int mtk_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ *val++ = readl(base + reg + (i++ * 4));
+
+ return 0;
+}
+
+static int mtk_reg_write(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ writel(*val++, base + reg + (i++ * 4));
+
+ return 0;
+}
static int mtk_efuse_probe(struct platform_device *pdev)
{
@@ -30,7 +50,6 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct nvmem_config *econfig;
- struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -42,14 +61,12 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (!econfig)
return -ENOMEM;
- mtk_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init_mmio(dev, base, &mtk_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig->stride = 4;
+ econfig->word_size = 4;
+ econfig->reg_read = mtk_reg_read;
+ econfig->reg_write = mtk_reg_write;
+ econfig->size = resource_size(res);
+ econfig->priv = base;
econfig->dev = dev;
econfig->owner = THIS_MODULE;
nvmem = nvmem_register(econfig);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 2bb3c5799ac4b..d26dd03cec80f 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/stmp_device.h>
@@ -66,11 +65,10 @@ static int mxs_ocotp_wait(struct mxs_ocotp *otp)
return 0;
}
-static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int mxs_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct mxs_ocotp *otp = context;
- unsigned int offset = *(u32 *)reg;
u32 *buf = val;
int ret;
@@ -94,17 +92,16 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
if (ret)
goto close_banks;
- while (val_size >= reg_size) {
+ while (bytes) {
if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
/* fill up non-data register */
- *buf = 0;
+ *buf++ = 0;
} else {
- *buf = readl(otp->base + offset);
+ *buf++ = readl(otp->base + offset);
}
- buf++;
- val_size -= reg_size;
- offset += reg_size;
+ bytes -= 4;
+ offset += 4;
}
close_banks:
@@ -117,57 +114,29 @@ disable_clk:
return ret;
}
-static int mxs_ocotp_write(void *context, const void *data, size_t count)
-{
- /* We don't want to support writing */
- return 0;
-}
-
-static bool mxs_ocotp_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
static struct nvmem_config ocotp_config = {
.name = "mxs-ocotp",
+ .stride = 16,
+ .word_size = 4,
.owner = THIS_MODULE,
+ .reg_read = mxs_ocotp_read,
};
-static const struct regmap_range imx23_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x210),
-};
-
-static const struct regmap_access_table imx23_access = {
- .yes_ranges = imx23_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx23_ranges),
-};
-
-static const struct regmap_range imx28_ranges[] = {
- regmap_reg_range(OCOTP_DATA_OFFSET, 0x290),
-};
-
-static const struct regmap_access_table imx28_access = {
- .yes_ranges = imx28_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx28_ranges),
+struct mxs_data {
+ int size;
};
-static struct regmap_bus mxs_ocotp_bus = {
- .read = mxs_ocotp_read,
- .write = mxs_ocotp_write, /* make regmap_init() happy */
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
+static const struct mxs_data imx23_data = {
+ .size = 0x220,
};
-static struct regmap_config mxs_ocotp_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 16,
- .writeable_reg = mxs_ocotp_writeable_reg,
+static const struct mxs_data imx28_data = {
+ .size = 0x2a0,
};
static const struct of_device_id mxs_ocotp_match[] = {
- { .compatible = "fsl,imx23-ocotp", .data = &imx23_access },
- { .compatible = "fsl,imx28-ocotp", .data = &imx28_access },
+ { .compatible = "fsl,imx23-ocotp", .data = &imx23_data },
+ { .compatible = "fsl,imx28-ocotp", .data = &imx28_data },
{ /* sentinel */},
};
MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
@@ -175,11 +144,10 @@ MODULE_DEVICE_TABLE(of, mxs_ocotp_match);
static int mxs_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct mxs_data *data;
struct mxs_ocotp *otp;
struct resource *res;
const struct of_device_id *match;
- struct regmap *regmap;
- const struct regmap_access_table *access;
int ret;
match = of_match_device(dev->driver->of_match_table, dev);
@@ -205,17 +173,10 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
return ret;
}
- access = match->data;
- mxs_ocotp_config.rd_table = access;
- mxs_ocotp_config.max_register = access->yes_ranges[0].range_max;
-
- regmap = devm_regmap_init(dev, &mxs_ocotp_bus, otp, &mxs_ocotp_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- ret = PTR_ERR(regmap);
- goto err_clk;
- }
+ data = match->data;
+ ocotp_config.size = data->size;
+ ocotp_config.priv = otp;
ocotp_config.dev = dev;
otp->nvmem = nvmem_register(&ocotp_config);
if (IS_ERR(otp->nvmem)) {
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index 3829e5fbf8c36..b5305f08b1848 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -13,21 +13,35 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/io.h>
#include <linux/nvmem-provider.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
-static struct regmap_config qfprom_regmap_config = {
- .reg_bits = 32,
- .val_bits = 8,
- .reg_stride = 1,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
-};
+static int qfprom_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
-static struct nvmem_config econfig = {
- .name = "qfprom",
- .owner = THIS_MODULE,
-};
+ while (words--)
+ *val++ = readl(base + reg + (i++ * 4));
+
+ return 0;
+}
+
+static int qfprom_reg_write(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+{
+ void __iomem *base = context;
+ u32 *val = _val;
+ int i = 0, words = bytes / 4;
+
+ while (words--)
+ writel(*val++, base + reg + (i++ * 4));
+
+ return 0;
+}
static int qfprom_remove(struct platform_device *pdev)
{
@@ -36,12 +50,20 @@ static int qfprom_remove(struct platform_device *pdev)
return nvmem_unregister(nvmem);
}
+static struct nvmem_config econfig = {
+ .name = "qfprom",
+ .owner = THIS_MODULE,
+ .stride = 4,
+ .word_size = 1,
+ .reg_read = qfprom_reg_read,
+ .reg_write = qfprom_reg_write,
+};
+
static int qfprom_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
void __iomem *base;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -49,14 +71,10 @@ static int qfprom_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- qfprom_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init_mmio(dev, base, &qfprom_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ econfig.size = resource_size(res);
econfig.dev = dev;
+ econfig.priv = base;
+
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c
index a009795111e98..4d3f391f0a0bb 100644
--- a/drivers/nvmem/rockchip-efuse.c
+++ b/drivers/nvmem/rockchip-efuse.c
@@ -23,7 +23,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#define EFUSE_A_SHIFT 6
#define EFUSE_A_MASK 0x3ff
@@ -41,17 +40,9 @@ struct rockchip_efuse_chip {
struct clk *clk;
};
-static int rockchip_efuse_write(void *context, const void *data, size_t count)
+static int rockchip_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
- /* Nothing TBD, Read-Only */
- return 0;
-}
-
-static int rockchip_efuse_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
-{
- unsigned int offset = *(u32 *)reg;
struct rockchip_efuse_chip *efuse = context;
u8 *buf = val;
int ret;
@@ -64,12 +55,12 @@ static int rockchip_efuse_read(void *context,
writel(EFUSE_LOAD | EFUSE_PGENB, efuse->base + REG_EFUSE_CTRL);
udelay(1);
- while (val_size) {
+ while (bytes--) {
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~(EFUSE_A_MASK << EFUSE_A_SHIFT)),
efuse->base + REG_EFUSE_CTRL);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
- ((offset & EFUSE_A_MASK) << EFUSE_A_SHIFT),
+ ((offset++ & EFUSE_A_MASK) << EFUSE_A_SHIFT),
efuse->base + REG_EFUSE_CTRL);
udelay(1);
writel(readl(efuse->base + REG_EFUSE_CTRL) |
@@ -79,9 +70,6 @@ static int rockchip_efuse_read(void *context,
writel(readl(efuse->base + REG_EFUSE_CTRL) &
(~EFUSE_STROBE), efuse->base + REG_EFUSE_CTRL);
udelay(1);
-
- val_size -= 1;
- offset += 1;
}
/* Switch to standby mode */
@@ -92,22 +80,11 @@ static int rockchip_efuse_read(void *context,
return 0;
}
-static struct regmap_bus rockchip_efuse_bus = {
- .read = rockchip_efuse_read,
- .write = rockchip_efuse_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static struct regmap_config rockchip_efuse_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 1,
- .val_bits = 8,
-};
-
static struct nvmem_config econfig = {
.name = "rockchip-efuse",
.owner = THIS_MODULE,
+ .stride = 1,
+ .word_size = 1,
.read_only = true,
};
@@ -121,7 +98,6 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
{
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
struct rockchip_efuse_chip *efuse;
efuse = devm_kzalloc(&pdev->dev, sizeof(struct rockchip_efuse_chip),
@@ -139,16 +115,9 @@ static int rockchip_efuse_probe(struct platform_device *pdev)
return PTR_ERR(efuse->clk);
efuse->dev = &pdev->dev;
-
- rockchip_efuse_regmap_config.max_register = resource_size(res) - 1;
-
- regmap = devm_regmap_init(efuse->dev, &rockchip_efuse_bus,
- efuse, &rockchip_efuse_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(efuse->dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig.size = resource_size(res);
+ econfig.reg_read = rockchip_efuse_read;
+ econfig.priv = efuse;
econfig.dev = efuse->dev;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index bc88b40840552..1567ccca8de3f 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -21,13 +21,14 @@
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/random.h>
static struct nvmem_config econfig = {
.name = "sunxi-sid",
.read_only = true,
+ .stride = 4,
+ .word_size = 1,
.owner = THIS_MODULE,
};
@@ -51,54 +52,23 @@ static u8 sunxi_sid_read_byte(const struct sunxi_sid *sid,
return sid_key; /* Only return the last byte */
}
-static int sunxi_sid_read(void *context,
- const void *reg, size_t reg_size,
- void *val, size_t val_size)
+static int sunxi_sid_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct sunxi_sid *sid = context;
- unsigned int offset = *(u32 *)reg;
u8 *buf = val;
- while (val_size) {
- *buf++ = sunxi_sid_read_byte(sid, offset);
- val_size--;
- offset++;
- }
-
- return 0;
-}
+ while (bytes--)
+ *buf++ = sunxi_sid_read_byte(sid, offset++);
-static int sunxi_sid_write(void *context, const void *data, size_t count)
-{
- /* Unimplemented, dummy to keep regmap core happy */
return 0;
}
-static struct regmap_bus sunxi_sid_bus = {
- .read = sunxi_sid_read,
- .write = sunxi_sid_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static bool sunxi_sid_writeable_reg(struct device *dev, unsigned int reg)
-{
- return false;
-}
-
-static struct regmap_config sunxi_sid_regmap_config = {
- .reg_bits = 32,
- .val_bits = 8,
- .reg_stride = 1,
- .writeable_reg = sunxi_sid_writeable_reg,
-};
-
static int sunxi_sid_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct nvmem_device *nvmem;
- struct regmap *regmap;
struct sunxi_sid *sid;
int ret, i, size;
char *randomness;
@@ -113,16 +83,10 @@ static int sunxi_sid_probe(struct platform_device *pdev)
return PTR_ERR(sid->base);
size = resource_size(res) - 1;
- sunxi_sid_regmap_config.max_register = size;
-
- regmap = devm_regmap_init(dev, &sunxi_sid_bus, sid,
- &sunxi_sid_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
-
+ econfig.size = resource_size(res);
econfig.dev = dev;
+ econfig.reg_read = sunxi_sid_read;
+ econfig.priv = sid;
nvmem = nvmem_register(&econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
diff --git a/drivers/nvmem/vf610-ocotp.c b/drivers/nvmem/vf610-ocotp.c
index 8641319efeda7..72e4faabce299 100644
--- a/drivers/nvmem/vf610-ocotp.c
+++ b/drivers/nvmem/vf610-ocotp.c
@@ -25,7 +25,6 @@
#include <linux/nvmem-provider.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
/* OCOTP Register Offsets */
@@ -152,23 +151,16 @@ static int vf610_get_fuse_address(int base_addr_offset)
return -EINVAL;
}
-static int vf610_ocotp_write(void *context, const void *data, size_t count)
-{
- return 0;
-}
-
-static int vf610_ocotp_read(void *context,
- const void *off, size_t reg_size,
- void *val, size_t val_size)
+static int vf610_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
{
struct vf610_ocotp *ocotp = context;
void __iomem *base = ocotp->base;
- unsigned int offset = *(u32 *)off;
u32 reg, *buf = val;
int fuse_addr;
int ret;
- while (val_size > 0) {
+ while (bytes > 0) {
fuse_addr = vf610_get_fuse_address(offset);
if (fuse_addr > 0) {
writel(ocotp->timing, base + OCOTP_TIMING);
@@ -205,29 +197,19 @@ static int vf610_ocotp_read(void *context,
}
buf++;
- val_size--;
- offset += reg_size;
+ bytes -= 4;
+ offset += 4;
}
return 0;
}
-static struct regmap_bus vf610_ocotp_bus = {
- .read = vf610_ocotp_read,
- .write = vf610_ocotp_write,
- .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
- .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
-};
-
-static struct regmap_config ocotp_regmap_config = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static struct nvmem_config ocotp_config = {
.name = "ocotp",
.owner = THIS_MODULE,
+ .stride = 4,
+ .word_size = 4,
+ .reg_read = vf610_ocotp_read,
};
static const struct of_device_id ocotp_of_match[] = {
@@ -247,7 +229,6 @@ static int vf610_ocotp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
- struct regmap *regmap;
struct vf610_ocotp *ocotp_dev;
ocotp_dev = devm_kzalloc(&pdev->dev,
@@ -267,13 +248,8 @@ static int vf610_ocotp_probe(struct platform_device *pdev)
return PTR_ERR(ocotp_dev->clk);
}
- ocotp_regmap_config.max_register = resource_size(res);
- regmap = devm_regmap_init(dev,
- &vf610_ocotp_bus, ocotp_dev, &ocotp_regmap_config);
- if (IS_ERR(regmap)) {
- dev_err(dev, "regmap init failed\n");
- return PTR_ERR(regmap);
- }
+ ocotp_config.size = resource_size(res);
+ ocotp_config.priv = ocotp_dev;
ocotp_config.dev = dev;
ocotp_dev->nvmem = nvmem_register(&ocotp_config);
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 91a469d55b8fb..c162a111673fe 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -931,8 +931,8 @@ EXPORT_SYMBOL(of_io_request_and_map);
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for
+ * this device in DT, or -EINVAL if the CPU address or size is invalid.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
@@ -993,6 +993,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
+ /*
+ * DT nodes sometimes incorrectly set the size as a mask. Work around
+ * those incorrect DT by computing the size as mask + 1.
+ */
+ if (*size & 1) {
+ pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n",
+ __func__, *size, np->full_name);
+ *size = *size + 1;
+ }
+
+ if (!*size) {
+ pr_err("%s: invalid size zero for dma-range in node(%s)\n",
+ __func__, np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e5f47cec75f34..57a5f2d9070c5 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -71,7 +71,7 @@ int of_device_add(struct platform_device *ofdev)
}
/**
- * of_dma_configure - Setup DMA configuration
+ * of_dma_configure - Setup DMA masks and offset
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
*
@@ -82,13 +82,11 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+void of_dma_configure_masks(struct device *dev, struct device_node *np)
{
- u64 dma_addr, paddr, size;
- int ret;
- bool coherent;
+ u64 dma_addr, paddr, size, range_mask;
unsigned long offset;
- struct iommu_ops *iommu;
+ int ret;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -106,25 +104,11 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
- dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ range_mask = dev->coherent_dma_mask + 1;
+ offset = 0;
} else {
+ range_mask = DMA_BIT_MASK(ilog2(dma_addr + size));
offset = PFN_DOWN(paddr - dma_addr);
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
- }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -134,22 +118,59 @@ void of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, range_mask);
+ *dev->dma_mask = min((*dev->dma_mask), range_mask);
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_masks);
+
+/**
+ * of_dma_configure_ops - Setup DMA operations
+ * @dev: Device to apply DMA configuration
+ * @np: Pointer to OF node having DMA configuration
+ *
+ * Try to get devices's DMA configuration from DT and update it
+ * accordingly.
+ */
+int of_dma_configure_ops(struct device *dev, struct device_node *np)
+{
+ u64 dma_addr, paddr, size;
+ struct iommu_ops *iommu;
+ bool coherent;
+ int ret;
+
+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ if (ret < 0) {
+ dma_addr = 0;
+ size = dev->coherent_dma_mask + 1;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_ops);
+
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index e7bfc175b8e1e..0c9118d849ee1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -586,13 +587,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
u32 rid_in)
{
struct device *parent_dev;
- struct device_node *msi_controller_node;
- struct device_node *msi_np = *np;
- u32 map_mask, masked_rid, rid_base, msi_base, rid_len, phandle;
- int msi_map_len;
- bool matched;
u32 rid_out = rid_in;
- const __be32 *msi_map = NULL;
/*
* Walk up the device parent links looking for one with a
@@ -602,71 +597,14 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
if (!parent_dev->of_node)
continue;
- msi_map = of_get_property(parent_dev->of_node,
- "msi-map", &msi_map_len);
- if (!msi_map)
+ if (!of_property_read_bool(parent_dev->of_node, "msi-map"))
continue;
- if (msi_map_len % (4 * sizeof(__be32))) {
- dev_err(parent_dev, "Error: Bad msi-map length: %d\n",
- msi_map_len);
- return rid_out;
- }
/* We have a good parent_dev and msi_map, let's use them. */
+ of_pci_map_rid(parent_dev->of_node, "msi-map", rid_in, np,
+ &rid_out);
break;
}
- if (!msi_map)
- return rid_out;
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "msi-map-mask" property. If
- * of_property_read_u32() fails, the default is used.
- */
- of_property_read_u32(parent_dev->of_node, "msi-map-mask", &map_mask);
-
- masked_rid = map_mask & rid_in;
- matched = false;
- while (!matched && msi_map_len >= 4 * sizeof(__be32)) {
- rid_base = be32_to_cpup(msi_map + 0);
- phandle = be32_to_cpup(msi_map + 1);
- msi_base = be32_to_cpup(msi_map + 2);
- rid_len = be32_to_cpup(msi_map + 3);
-
- if (rid_base & ~map_mask) {
- dev_err(parent_dev,
- "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
- map_mask, rid_base);
- return rid_out;
- }
-
- msi_controller_node = of_find_node_by_phandle(phandle);
-
- matched = (masked_rid >= rid_base &&
- masked_rid < rid_base + rid_len);
- if (msi_np)
- matched &= msi_np == msi_controller_node;
-
- if (matched && !msi_np) {
- *np = msi_np = msi_controller_node;
- break;
- }
-
- of_node_put(msi_controller_node);
- msi_map_len -= 4 * sizeof(__be32);
- msi_map += 4;
- }
- if (!matched)
- return rid_out;
-
- rid_out = masked_rid - rid_base + msi_base;
- dev_dbg(dev,
- "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
- dev_name(parent_dev), map_mask, rid_base, msi_base,
- rid_len, rid_in, rid_out);
-
return rid_out;
}
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 13f4fed380485..20bf5a0c57fd1 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -306,3 +306,105 @@ struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node)
EXPORT_SYMBOL_GPL(of_pci_find_msi_chip_by_node);
#endif /* CONFIG_PCI_MSI */
+
+#define MASK_NAME_LEN 32 /* Safely longer than "iommu-map-mask" */
+
+/**
+ * of_pci_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @map_name: property name of the map to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a PCI requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. @target or @id_out may
+ * be NULL if not required. If @target points to a device node pointer, only
+ * entries targeting that node will be matched; if it points to a NULL
+ * value, it will receive the device node for the first matching target entry,
+ * with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_pci_map_rid(struct device_node *np, const char *map_name, u32 rid_in,
+ struct device_node **target, u32 *rid_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+ char mask_name[MASK_NAME_LEN];
+
+ if (!np || !map_name || (!target && !rid_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *rid_out = rid_in;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%s: Error: Bad %s length: %d\n", np->full_name,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ snprintf(mask_name, MASK_NAME_LEN, "%s-mask", map_name);
+ of_property_read_u32(np, mask_name, &map_mask);
+
+ masked_rid = map_mask & rid_in;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%s: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np->full_name, map_name, map_name,
+ map_mask, rid_base);
+ return -EINVAL;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (rid_out)
+ *rid_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%s: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np->full_name, map_name, map_mask, rid_base, out_base,
+ rid_len, rid_in, *rid_out);
+ return 0;
+ }
+
+ pr_err("%s: Invalid %s translation - no match for rid 0x%x on %s\n",
+ np->full_name, map_name, rid_in,
+ target && *target ? (*target)->full_name : "any target");
+ return -EINVAL;
+}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 8d103e4968be4..12bbc8e167d04 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -151,11 +151,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -184,11 +179,10 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -248,7 +242,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
+ of_dma_configure_ops(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -484,11 +479,12 @@ static int of_platform_device_destroy(struct device *dev, void *data)
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
- else if (dev->bus == &amba_bustype)
+ else if (dev->bus == &amba_bustype) {
amba_device_unregister(to_amba_device(dev));
+ of_dma_deconfigure(dev);
+ }
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8004f67c57ec6..9aa86aa9f62c3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1716,7 +1716,8 @@ static void pci_dma_configure(struct pci_dev *dev)
if (IS_ENABLED(CONFIG_OF) &&
bridge->parent && bridge->parent->of_node) {
- of_dma_configure(&dev->dev, bridge->parent->of_node);
+ of_dma_configure_masks(&dev->dev, bridge->parent->of_node);
+ of_dma_configure_ops(&dev->dev, bridge->parent->of_node);
} else if (has_acpi_companion(bridge)) {
struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
enum dev_dma_attr attr = acpi_get_dma_attr(adev);
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 107cb57c3513c..73feef7e11ffe 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -186,16 +186,27 @@ ufs_qcom_phy_init_clks(struct phy *generic_phy,
struct ufs_qcom_phy *phy_common)
{
int err;
+ struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
&phy_common->tx_iface_clk);
+ /*
+ * tx_iface_clk does not exist in newer version of ufs-phy HW,
+ * so don't return error if it is not found
+ */
if (err)
- goto out;
+ dev_dbg(phy->dev, "%s: failed to get tx_iface_clk\n",
+ __func__);
err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
&phy_common->rx_iface_clk);
+ /*
+ * rx_iface_clk does not exist in newer version of ufs-phy HW,
+ * so don't return error if it is not found
+ */
if (err)
- goto out;
+ dev_dbg(phy->dev, "%s: failed to get rx_iface_clk\n",
+ __func__);
err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
&phy_common->ref_clk_src);
@@ -536,6 +547,9 @@ int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
if (phy->is_iface_clk_enabled)
goto out;
+ if (!phy->tx_iface_clk)
+ goto out;
+
ret = clk_prepare_enable(phy->tx_iface_clk);
if (ret) {
dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
@@ -561,6 +575,9 @@ void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy)
{
struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
+ if (!phy->tx_iface_clk)
+ return;
+
if (phy->is_iface_clk_enabled) {
clk_disable_unprepare(phy->tx_iface_clk);
clk_disable_unprepare(phy->rx_iface_clk);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index a67eeace6a89b..67e77e4e86a95 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -11,6 +11,20 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ select PM_OPP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like MSM8916.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index ba4c7bc692253..88f4d5d49cba2 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 0000000000000..3daa2153d742d
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1988 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/cpufreq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK BIT(-1)
+#define RBCPR_CLK_SEL_19P2_MHZ 0
+#define RBCPR_CLK_SEL_AHB_CLK BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define SPEED_BIN_NONE UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN (-1)
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY 0xffffffff
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * @VDD_MX_VMIN_APC: Use APC voltage
+ * @VDD_MX_VMIN_APC_CORNER_CEILING: Use PVS corner ceiling voltage
+ * @VDD_MX_VMIN_APC_SLOW_CORNER_CEILING: Use slow speed corner ceiling
+ * @VDD_MX_VMIN_MX_VMAX: Use specified vdd-mx-vmax voltage
+ * @VDD_MX_VMIN_APC_CORNER_MAP: Use APC corner mapped MX voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+ VDD_MX_VMIN_APC_CORNER_MAP,
+};
+/* TODO: Trim these above to used values */
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct qfprom_offset {
+ u16 offset;
+ u8 width;
+ u8 shift;
+};
+
+struct cpr_fuse {
+ struct qfprom_offset ring_osc;
+ struct qfprom_offset init_voltage;
+ struct qfprom_offset quotient;
+ struct qfprom_offset quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_quot_scale;
+ int quot_offset;
+ int quot_scale;
+ int max_volt_scale;
+ int vdd_mx_req;
+};
+
+struct cpr_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ int init_voltage_step;
+ struct fuse_corner_data *fuse_corner_data;
+ struct cpr_fuse *cpr_fuse;
+ struct qfprom_offset *disable;
+};
+
+struct pvs_bin {
+ int *uV;
+};
+
+struct pvs_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ struct qfprom_offset *pvs_fuse;
+ struct pvs_bin *pvs_bins;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct freq_plan {
+ u32 speed_bin;
+ u32 pvs_version;
+ const struct corner_data **plan;
+};
+
+struct fuse_conditional_min_volt {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int min_uV;
+};
+
+struct fuse_uplift_wa {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int uV;
+ int *quot;
+ int max_uV;
+ int speed_bin;
+};
+
+struct corner_override {
+ u32 speed_bin;
+ u32 pvs_version;
+ int *max_uV;
+ int *min_uV;
+};
+
+struct corner_adjustment {
+ u32 speed_bin;
+ u32 pvs_version;
+ u32 cpr_rev;
+ u8 *ring_osc_idx;
+ int *fuse_quot;
+ int *fuse_quot_diff;
+ int *fuse_quot_min;
+ int *fuse_quot_offset;
+ int *fuse_init_uV;
+ int *quot;
+ int *init_uV;
+ bool disable_closed_loop;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+ int min_diff_quot;
+ int *step_quot;
+ struct cpr_fuses cpr_fuses;
+ struct qfprom_offset fuse_revision;
+ struct qfprom_offset speed_bin;
+ struct qfprom_offset pvs_version;
+ struct corner_data *corner_data;
+ struct freq_plan *freq_plans;
+ size_t num_freq_plans;
+ struct pvs_fuses *pvs_fuses;
+ struct fuse_conditional_min_volt *min_volt_fuse;
+ struct fuse_uplift_wa *uplift_wa;
+ struct corner_override *corner_overrides;
+ size_t num_corner_overrides;
+ struct corner_adjustment *adjustments;
+ size_t num_adjustments;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *settings;
+ struct reg_sequence *override_settings;
+ int num_regs_per_fuse;
+
+ struct qfprom_offset override;
+ u8 override_value;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ int vdd_mx_req;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+
+ unsigned int nb_count;
+ struct notifier_block cpufreq_nb;
+ bool switching_opp;
+ struct notifier_block reg_nb;
+
+ unsigned int ref_clk_khz;
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+ int ceiling_max;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct regulator *vdd_mx;
+ struct clk *cpu_clk;
+ struct device *cpu_dev;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ bool suspended;
+ u32 gcnt;
+ unsigned long flags;
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ if (drv->loop_disabled) /* || disabled in software */
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+
+ if (drv->suspended)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) /*&& drv->vreg_enabled */ &&
+ corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ if (drv->suspended)
+ return;
+
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = drv->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ pr_debug("gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n", gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_drv *drv, struct corner *corner)
+{
+ if (drv->corner == corner)
+ return;
+
+ cpr_corner_restore(drv, corner);
+}
+
+static int
+cpr_mx_get(struct cpr_drv *drv, struct fuse_corner *fuse, int apc_volt)
+{
+ int vdd_mx;
+ struct fuse_corner *highest_fuse;
+
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+
+ switch (drv->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx = apc_volt;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx = fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx = highest_fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ vdd_mx = drv->vdd_mx_vmax;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ vdd_mx = fuse->vdd_mx_req;
+ break;
+ default:
+ BUG();
+ }
+
+ return vdd_mx;
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == UP)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == DOWN)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_regulator_notifier(struct notifier_block *nb,
+ unsigned long event, void *d)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, reg_nb);
+ u32 val, mask;
+ int last_uV, new_uV;
+
+ switch (event) {
+ case REGULATOR_EVENT_VOLTAGE_CHANGE:
+ new_uV = (int)(uintptr_t)d;
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&drv->lock);
+
+ last_uV = drv->corner->last_uV;
+
+ if (drv->switching_opp) {
+ goto unlock;
+ } else if (last_uV < new_uV) {
+ /* Disable auto nack down */
+ mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (last_uV > new_uV) {
+ /* Restore default threshold for UP */
+ mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = drv->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ } else { /* Somehow it's the same? */
+ goto unlock;
+ }
+
+ cpr_ctl_modify(drv, mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ drv->corner->last_uV = new_uV;
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV;
+ struct corner *corner;
+ struct fuse_corner *fuse;
+
+ //step_uV = regulator_get_linear_step(drv->vdd_apc);
+ step_uV = 12500; /*TODO: Get step volt here */
+ corner = drv->corner;
+ fuse = corner->fuse_corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (drv->clamp_timer_interval &&
+ error_steps < drv->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->up_threshold,
+ drv->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_up_limit)
+ error_steps = drv->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ if (new_uV > corner->max_uV)
+ new_uV = corner->max_uV;
+ } else if (dir == DOWN) {
+ if (drv->clamp_timer_interval
+ && error_steps < drv->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->down_threshold,
+ drv->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_down_limit)
+ error_steps = drv->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ if (new_uV < corner->min_uV)
+ new_uV = corner->min_uV;
+ }
+
+ return new_uV;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ u32 val;
+ int new_uV = 0;
+ struct corner *corner;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ pr_debug("IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ pr_debug("CPR is disabled\n");
+ goto unlock;
+ } else if (cpr_ctl_is_busy(drv) && !drv->clamp_timer_interval) {
+ pr_debug("CPR measurement is not ready\n");
+ goto unlock;
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ pr_err_ratelimited("Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ goto unlock;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (val & CPR_INT_UP) {
+ new_uV = cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ new_uV = cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ pr_debug("IRQ occurred for Mid Flag\n");
+ } else {
+ pr_debug("IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ corner = drv->corner;
+ cpr_corner_save(drv, corner);
+unlock:
+ mutex_unlock(&drv->lock);
+
+ if (new_uV)
+ dev_pm_opp_adjust_voltage(drv->cpu_dev, corner->freq, new_uV);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * TODO: Register for hotplug notifier and turn on/off CPR when CPUs are offline
+ */
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ /* Enable dependency power before vdd_apc */
+ if (drv->vdd_mx) {
+ ret = regulator_enable(drv->vdd_mx);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = true;
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+ mutex_unlock(&drv->lock);
+ pr_info("CPR is enabled!\n");
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ if (drv->vdd_mx)
+ ret = regulator_disable(drv->vdd_mx);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = false;
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cpr_suspend(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ drv->suspended = true;
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+
+static int cpr_resume(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ drv->suspended = false;
+ cpr_irq_clr(drv);
+ cpr_ctl_enable(drv, drv->corner);
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpr_pm_ops, cpr_suspend, cpr_resume);
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = RBIF_LIMIT_CEILING_DEFAULT << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * drv->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * drv->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ pr_debug("Timer count: 0x%0x (for %d us)\n", val, drv->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= drv->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = drv->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= drv->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+/* Called twice for each CPU in policy, one pre and one post event */
+static int
+cpr_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *f)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, cpufreq_nb);
+ struct cpufreq_freqs *freqs = f;
+ unsigned long old = freqs->old * 1000;
+ unsigned long new = freqs->new * 1000;
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+ int vdd_mx_vmin = 0;
+ struct fuse_corner *fuse_corner;
+
+ /* Determine direction */
+ if (old > new)
+ dir = DOWN;
+ else if (old < new)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ /* Determine new corner we're going to */
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ for (; corner <= end; corner++)
+ if (corner->freq == new)
+ break;
+
+ if (corner > end)
+ return -EINVAL;
+
+ fuse_corner = corner->fuse_corner;
+
+ if (cpr_is_allowed(drv)) {
+ new_uV = corner->last_uV;
+ } else {
+ new_uV = corner->uV;
+ }
+
+ if (dir != NO_CHANGE && drv->vdd_mx)
+ vdd_mx_vmin = cpr_mx_get(drv, fuse_corner, new_uV);
+
+ mutex_lock(&drv->lock);
+ if (event == CPUFREQ_PRECHANGE) {
+ if (drv->nb_count++)
+ goto unlock;
+
+ pr_debug("Pre change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ drv->switching_opp = true;
+ }
+
+ if (event == CPUFREQ_POSTCHANGE) {
+ if (--drv->nb_count)
+ goto unlock;
+
+ pr_debug("Post change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv) /* && drv->vreg_enabled */) {
+ cpr_irq_clr(drv);
+ cpr_corner_switch(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+ drv->switching_opp = false;
+ }
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static u32 cpr_read_efuse(void __iomem *prom, const struct qfprom_offset *efuse)
+{
+ u64 buffer = 0;
+ u8 val;
+ int i, num_bytes;
+
+ num_bytes = DIV_ROUND_UP(efuse->width + efuse->shift, BITS_PER_BYTE);
+
+ for (i = 0; i < num_bytes; i++) {
+ val = readb_relaxed(prom + efuse->offset + i);
+ buffer |= val << (i * BITS_PER_BYTE);
+ }
+
+ buffer >>= efuse->shift;
+ buffer &= BIT(efuse->width) - 1;
+
+ return buffer;
+}
+
+static void
+cpr_populate_ring_osc_idx(const struct cpr_fuse *fuses, struct cpr_drv *drv,
+ void __iomem *prom)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->num_fuse_corners;
+
+ for (; fuse < end; fuse++, fuses++)
+ fuse->ring_osc_idx = cpr_read_efuse(prom, &fuses->ring_osc);
+}
+
+
+static const struct corner_adjustment *cpr_find_adjustment(u32 speed_bin,
+ u32 pvs_version, u32 cpr_rev, const struct cpr_desc *desc,
+ const struct cpr_drv *drv)
+{
+ int i, j;
+ u32 val, ro;
+ struct corner_adjustment *a;
+
+ for (i = 0; i < desc->num_adjustments; i++) {
+ a = &desc->adjustments[i];
+
+ if (a->speed_bin != speed_bin &&
+ a->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->pvs_version != pvs_version &&
+ a->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->cpr_rev != cpr_rev &&
+ a->cpr_rev != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = 0; j < drv->num_fuse_corners; j++) {
+ val = a->ring_osc_idx[j];
+ ro = drv->fuse_corners[j].ring_osc_idx;
+ if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+ break;
+ }
+ if (j == drv->num_fuse_corners)
+ return a;
+ }
+
+ return NULL;
+}
+
+static void cpr_fuse_corner_init(struct cpr_drv *drv,
+ const struct cpr_desc *desc,
+ void __iomem *qfprom,
+ const struct cpr_fuse *fuses, u32 speed,
+ const struct corner_adjustment *adjustments,
+ const struct acc_desc *acc_desc)
+{
+ int i;
+ unsigned int idx = 0;
+ unsigned int step_volt;
+ int steps, step_size_uv;
+ const struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end, *prev;
+ const struct qfprom_offset *pvs_efuse;
+ const struct qfprom_offset *init_v_efuse;
+ const struct qfprom_offset *redun;
+ const struct fuse_conditional_min_volt *min_v;
+ const struct fuse_uplift_wa *up;
+ bool do_min_v = false, do_uplift = false;
+ const int *pvs_uV = NULL;
+ const int *adj_uV, *adj_quot, *adj_min, *min_diff_quot;
+ const int *step_quot;
+ int uV, diff;
+ u32 bits, bin;
+ u32 min_uV;
+ u8 expected;
+ const struct reg_sequence *accs;
+
+ redun = &acc_desc->override;
+ expected = acc_desc->override_value;
+ if (redun->width && cpr_read_efuse(qfprom, redun) == expected)
+ accs = acc_desc->override_settings;
+ else
+ accs = acc_desc->settings;
+
+ /* Figure out if we should apply workarounds */
+ min_v = desc->min_volt_fuse;
+ do_min_v = min_v &&
+ cpr_read_efuse(qfprom, &min_v->redundant) == min_v->expected;
+ if (do_min_v)
+ min_uV = min_v->min_uV;
+
+ up = desc->uplift_wa;
+ if (!do_min_v && up)
+ if (cpr_read_efuse(qfprom, &up->redundant) == up->expected)
+ do_uplift = up->speed_bin == speed;
+
+ adj_uV = adjustments ? adjustments->fuse_init_uV : NULL;
+ adj_quot = adjustments ? adjustments->fuse_quot : NULL;
+ adj_min = adjustments ? adjustments->fuse_quot_min : NULL;
+ min_diff_quot = adjustments ? adjustments->fuse_quot_diff : NULL;
+ fuse = drv->fuse_corners;
+ end = &fuse[drv->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ step_quot = desc->step_quot;
+
+ /*
+ * The initial voltage for each fuse corner may be determined by one of
+ * two ways. Either initial voltages are encoded for each fuse corner
+ * in a dedicated fuse per fuse corner (fuses::init_voltage), or we
+ * use the PVS bin fuse to use a table of initial voltages (pvs_uV).
+ */
+ if (fuses->init_voltage.width) {
+ //step_volt = regulator_get_linear_step(drv->vdd_apc);
+ step_volt = 12500; /* TODO: Replace with ^ when apc_reg ready */
+ step_size_uv = desc->cpr_fuses.init_voltage_step;
+ } else {
+ redun = &desc->pvs_fuses->redundant;
+ expected = desc->pvs_fuses->redundant_value;
+ if (redun->width)
+ idx = !!(cpr_read_efuse(qfprom, redun) == expected);
+
+ pvs_efuse = &desc->pvs_fuses->pvs_fuse[idx];
+ bin = cpr_read_efuse(qfprom, pvs_efuse);
+ pvs_uV = desc->pvs_fuses->pvs_bins[bin].uV;
+ }
+
+ /* Populate fuse_corner voltage and ring_osc_idx members */
+ prev = NULL;
+ for (i = 0; fuse <= end; fuse++, fuses++, i++) {
+ if (pvs_uV) {
+ uV = pvs_uV[i];
+ } else {
+ init_v_efuse = &fuses->init_voltage;
+ bits = cpr_read_efuse(qfprom, init_v_efuse);
+ /* Not two's complement.. instead highest bit is sign */
+ steps = bits & BIT(init_v_efuse->width - 1) ? -1 : 1;
+ steps *= bits & ~BIT(init_v_efuse->width - 1);
+
+ uV = fdata[i].ref_uV + steps * step_size_uv;
+ uV = DIV_ROUND_UP(uV, step_volt) * step_volt;
+ }
+
+ if (adj_uV)
+ uV += adj_uV[i];
+
+ fuse->min_uV = fdata[i].min_uV;
+ fuse->max_uV = fdata[i].max_uV;
+
+ if (do_min_v) {
+ if (fuse->max_uV < min_uV) {
+ fuse->max_uV = min_uV;
+ fuse->min_uV = min_uV;
+ } else if (fuse->min_uV < min_uV) {
+ fuse->min_uV = min_uV;
+ }
+ }
+
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ if (do_uplift) {
+ end->uV += up->uV;
+ end->uV = clamp(end->uV, 0, up->max_uV);
+ }
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Unpack the target quotient by scaling. */
+ fuse->quot = cpr_read_efuse(qfprom, &fuses->quotient);
+ fuse->quot *= fdata[i].quot_scale;
+ fuse->quot += fdata[i].quot_offset;
+
+ if (adj_quot) {
+ fuse->quot += adj_quot[i];
+
+ if (prev && min_diff_quot) {
+ diff = min_diff_quot[i];
+ if (fuse->quot - prev->quot <= diff)
+ fuse->quot = prev->quot + adj_min[i];
+ }
+ prev = fuse;
+ }
+
+ if (do_uplift)
+ fuse->quot += up->quot[i];
+
+ fuse->step_quot = step_quot[fuse->ring_osc_idx];
+
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+
+ fuse->vdd_mx_req = fdata[i].vdd_mx_req;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ pr_debug("fuse corner %d: [%d %d %d] RO%d quot %d squot %d\n", i,
+ fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot,
+ fuse->step_quot);
+ }
+
+ drv->ceiling_max = end->max_uV;
+}
+
+static int cpr_populate_opps(struct device_node *of_node, struct cpr_drv *drv,
+ const struct corner_data **plan)
+{
+ int i, j, ret, cpu;
+ struct device *cpu_dev;
+ struct device_node *np;
+ struct corner *corner;
+ const struct corner_data *p;
+
+ for (i = 0; (np = of_parse_phandle(of_node, "qcom,cpr-cpus", i)); i++) {
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(np, cpu, NULL))
+ break;
+
+ of_node_put(np);
+ if (cpu >= nr_cpu_ids) {
+ pr_err("Failed to find logical CPU for %s\n", np->name);
+ return -EINVAL;
+ }
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -EINVAL;
+
+ /*
+ * Keep cpu_dev and its regulator and clock for monitoring
+ * voltage changes and updating OPPs later.
+ */
+ if (i == 0) {
+ drv->cpu_dev = cpu_dev;
+ drv->vdd_apc = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+ drv->cpu_clk = devm_clk_get(cpu_dev, NULL);
+ if (IS_ERR(drv->cpu_clk))
+ return PTR_ERR(drv->cpu_clk);
+ }
+
+ for (j = 0, corner = drv->corners; plan[j]; j++, corner++) {
+ p = plan[j];
+ ret = dev_pm_opp_add(cpu_dev, p->freq, corner->uV);
+ if (ret)
+ return ret;
+ corner->freq = p->freq;
+ }
+ }
+
+ return 0;
+}
+
+static const struct corner_data **
+find_freq_plan(const struct cpr_desc *desc, u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ const struct freq_plan *p;
+
+ for (i = 0; i < desc->num_freq_plans; i++) {
+ p = &desc->freq_plans[i];
+
+ if (p->speed_bin != speed_bin &&
+ p->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (p->pvs_version != pvs_version &&
+ p->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return p->plan;
+ }
+
+ return NULL;
+
+}
+
+static struct corner_override *find_corner_override(const struct cpr_desc *desc,
+ u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ struct corner_override *o;
+
+ for (i = 0; i < desc->num_corner_overrides; i++) {
+ o = &desc->corner_overrides[i];
+
+ if (o->speed_bin != speed_bin &&
+ o->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (o->pvs_version != pvs_version &&
+ o->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return o;
+ }
+
+ return NULL;
+
+}
+
+static void cpr_corner_init(struct cpr_drv *drv, const struct cpr_desc *desc,
+ const struct cpr_fuse *fuses, u32 speed_bin,
+ u32 pvs_version, void __iomem *qfprom,
+ const struct corner_adjustment *adjustments,
+ const struct corner_data **plan)
+{
+ int i, fnum, quot_diff, scaling;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ const struct corner_data *cdata, *p;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling = false;
+ const int *adj_quot, *adj_volt, *adj_quot_offset;
+ const struct qfprom_offset *quot_offset;
+ unsigned long freq_corner, freq_diff, freq_diff_mhz;
+ unsigned long freq_high, freq_low;
+ int volt_high;
+ u64 temp, temp_limit;
+ int step_volt = 12500; /* TODO: Get from regulator APIs */
+ const struct corner_override *override;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ cdata = desc->corner_data;
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ adj_quot = adjustments ? adjustments->quot : NULL;
+ adj_volt = adjustments ? adjustments->init_uV : NULL;
+ adj_quot_offset = adjustments ? adjustments->fuse_quot_offset : NULL;
+
+ override = find_corner_override(desc, speed_bin, pvs_version);
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (i = 0; plan[i]; i++) {
+ p = plan[i];
+ freq_corner = p->freq;
+ fnum = p->fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (freq_corner > fuse->max_freq)
+ fuse->max_freq = freq_corner;
+
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ freq_corner = cdata[i].freq;
+ fnum = cdata[i].fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->uV = fuse->uV;
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ quot_offset = &fuses[fnum].quotient_offset;
+ if (quot_offset->width) {
+ quot_diff = cpr_read_efuse(qfprom, quot_offset);
+ quot_diff *= fdata->quot_scale;
+ if (adj_quot_offset)
+ quot_diff += adj_quot_offset[fnum];
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ scaling = min(scaling, fdata[fnum].max_quot_scale);
+
+ apply_scaling = true;
+ } else if (freq_corner == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - freq_corner;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ freq_high = fuse->max_freq;
+ freq_low = fuse->max_freq;
+ volt_high = fuse->uV;
+
+ /*
+ if (freq_high > freq_low && volt_high > volt_low &&
+ freq_high > freq_corner)
+ */
+
+ temp = freq_diff * (fuse->uV - prev_fuse->uV);
+ do_div(temp, freq_high - freq_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = freq_diff * fdata[fnum].max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ corner->uV = volt_high - min(temp, temp_limit);
+ corner->uV = roundup(corner->uV, step_volt);
+ }
+
+ if (adj_quot)
+ corner->quot_adjust -= adj_quot[i];
+
+ if (adj_volt)
+ corner->uV += adj_volt[i];
+
+ /* Load per corner ceiling and floor voltages if they exist. */
+ if (override) {
+ corner->max_uV = override->max_uV[i];
+ corner->min_uV = override->min_uV[i];
+ } else {
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ }
+
+ if (drv->ceiling_max < corner->max_uV)
+ drv->ceiling_max = corner->max_uV;
+
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ pr_debug("corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+}
+
+static const struct cpr_fuse *
+cpr_get_fuses(const struct cpr_desc *desc, void __iomem *qfprom)
+{
+ u32 expected = desc->cpr_fuses.redundant_value;
+ const struct qfprom_offset *fuse = &desc->cpr_fuses.redundant;
+ unsigned int idx;
+
+ idx = !!(fuse->width && cpr_read_efuse(qfprom, fuse) == expected);
+
+ return &desc->cpr_fuses.cpr_fuse[idx * desc->num_fuse_corners];
+}
+
+static bool cpr_is_close_loop_disabled(struct cpr_drv *drv,
+ const struct cpr_desc *desc, void __iomem *qfprom,
+ const struct cpr_fuse *fuses,
+ const struct corner_adjustment *adj)
+{
+ const struct qfprom_offset *disable;
+ unsigned int idx;
+ struct fuse_corner *highest_fuse, *second_highest_fuse;
+ int min_diff_quot, diff_quot;
+
+ if (adj && adj->disable_closed_loop)
+ return true;
+
+ if (!desc->cpr_fuses.disable)
+ return false;
+
+ /*
+ * Are the fuses the redundant ones? This avoids reading the fuse
+ * redundant bit again
+ */
+ idx = !!(fuses == desc->cpr_fuses.cpr_fuse);
+ disable = &desc->cpr_fuses.disable[idx];
+
+ if (cpr_read_efuse(qfprom, disable))
+ return true;
+
+ if (!fuses->quotient_offset.width) {
+ /*
+ * Check if the target quotients for the highest two fuse
+ * corners are too close together.
+ */
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+ second_highest_fuse = highest_fuse - 1;
+
+ min_diff_quot = desc->min_diff_quot;
+ diff_quot = highest_fuse->quot - second_highest_fuse->quot;
+
+ return diff_quot < min_diff_quot;
+ }
+
+ return false;
+}
+
+static int cpr_init_parameters(struct platform_device *pdev,
+ struct cpr_drv *drv)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-ref-clk",
+ &drv->ref_clk_khz);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-delay-us",
+ &drv->timer_delay_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-up",
+ &drv->timer_cons_up);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-down",
+ &drv->timer_cons_down);
+ if (ret)
+ return ret;
+ drv->timer_cons_down &= RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-up-threshold",
+ &drv->up_threshold);
+ drv->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-down-threshold",
+ &drv->down_threshold);
+ drv->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-idle-clocks",
+ &drv->idle_clocks);
+ drv->idle_clocks &= RBCPR_STEP_QUOT_IDLE_CLK_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-gcnt-us", &drv->gcnt_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-up-limit",
+ &drv->vdd_apc_step_up_limit);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-down-limit",
+ &drv->vdd_apc_step_down_limit);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+ &drv->clamp_timer_interval);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ drv->clamp_timer_interval = min_t(unsigned int,
+ drv->clamp_timer_interval,
+ RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+ pr_debug("up threshold = %u, down threshold = %u\n",
+ drv->up_threshold, drv->down_threshold);
+
+ return 0;
+}
+
+static int cpr_init_and_enable_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ struct corner *end;
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ for (drv->corner = drv->corners; drv->corner <= end; drv->corner++)
+ if (drv->corner->freq == rate)
+ break;
+
+ if (drv->corner > end)
+ return -EINVAL;
+
+ return cpr_enable(drv);
+}
+
+static struct corner_data msm8916_corner_data[] = {
+ /* [corner] -> { fuse corner, freq } */
+ { 0, 200000000 },
+ { 0, 400000000 },
+ { 1, 533330000 },
+ { 1, 800000000 },
+ { 2, 998400000 },
+ { 2, 1094400000 },
+ { 2, 1152000000 },
+ { 2, 1209600000 },
+ { 2, 1363200000 },
+};
+
+static const struct cpr_desc msm8916_desc = {
+ .num_fuse_corners = 3,
+ .vdd_mx_vmin_method = VDD_MX_VMIN_APC_CORNER_MAP,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 26, 26, 26, 26, 26, 26, 26, 26 },
+ .cpr_fuses = {
+ .init_voltage_step = 10000,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* ref_uV max_uV min_uV max_q q_off q_scl v_scl mx */
+ { 1050000, 1050000, 1050000, 0, 0, 1, 0, 3 },
+ { 1150000, 1150000, 1050000, 0, 0, 1, 0, 4 },
+ { 1350000, 1350000, 1162500, 650, 0, 1, 0, 6 },
+ },
+ .cpr_fuse = (struct cpr_fuse[]){
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 220, 6, 2 },
+ .quotient = { 221, 12, 2 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 218, 6, 2 },
+ .quotient = { 219, 12, 0 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 216, 6, 0 },
+ .quotient = { 216, 12, 6 },
+ },
+ },
+ .disable = &(struct qfprom_offset){ 223, 1, 1 },
+ },
+ .speed_bin = { 12, 3, 2 },
+ .pvs_version = { 6, 2, 7 },
+ .corner_data = msm8916_corner_data,
+ .num_corners = ARRAY_SIZE(msm8916_corner_data),
+ .num_freq_plans = 3,
+ .freq_plans = (struct freq_plan[]){
+ {
+ .speed_bin = 0,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 0,
+ .pvs_version = 1,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 2,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ msm8916_corner_data + 8,
+ NULL
+ },
+ },
+ },
+};
+
+static const struct acc_desc msm8916_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x101 }
+ },
+ .override_settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x100 }
+ },
+ .num_regs_per_fuse = 1,
+ .override = { 6, 1, 4 },
+ .override_value = 1,
+};
+
+static const struct of_device_id cpr_descs[] = {
+ { .compatible = "qcom,qfprom-msm8916", .data = &msm8916_desc },
+ { }
+};
+
+static const struct of_device_id acc_descs[] = {
+ { .compatible = "qcom,tcsr-msm8916", .data = &msm8916_acc_desc },
+ { }
+};
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ const struct cpr_fuse *cpr_fuses;
+ const struct corner_adjustment *adj;
+ const struct corner_data **plan;
+ size_t len;
+ int irq, ret;
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct of_device_id *match;
+ struct device_node *np;
+ void __iomem *qfprom;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+ u32 speed_bin = SPEED_BIN_NONE;
+ u32 pvs_version = 0;
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
+
+ np = of_parse_phandle(dev->of_node, "eeprom", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(cpr_descs, np);
+ if (!match)
+ return -EINVAL;
+ desc = match->data;
+
+ /* TODO: Get from eeprom API */
+ qfprom = devm_ioremap(dev, 0x58000, 0x7000);
+ if (!qfprom)
+ return -ENOMEM;
+
+ len = sizeof(*drv) +
+ sizeof(*drv->fuse_corners) * desc->num_fuse_corners +
+ sizeof(*drv->corners) * desc->num_corners;
+
+ drv = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(acc_descs, np);
+ if (!match)
+ return -EINVAL;
+
+ acc_desc = match->data;
+ drv->tcsr = syscon_node_to_regmap(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ drv->num_fuse_corners = desc->num_fuse_corners;
+ drv->num_corners = desc->num_corners;
+ drv->fuse_corners = (struct fuse_corner *)(drv + 1);
+ drv->corners = (struct corner *)(drv->fuse_corners +
+ drv->num_fuse_corners);
+ mutex_init(&drv->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_mx = devm_regulator_get(dev, "vdd-mx");
+ if (IS_ERR(drv->vdd_mx))
+ return PTR_ERR(drv->vdd_mx);
+
+ drv->vdd_mx_vmin_method = desc->vdd_mx_vmin_method;
+ drv->vdd_mx_vmax = desc->vdd_mx_vmax;
+
+ if (desc->fuse_revision.width)
+ cpr_rev = cpr_read_efuse(qfprom, &desc->fuse_revision);
+ if (desc->speed_bin.width)
+ speed_bin = cpr_read_efuse(qfprom, &desc->speed_bin);
+ if (desc->pvs_version.width)
+ pvs_version = cpr_read_efuse(qfprom, &desc->pvs_version);
+
+ plan = find_freq_plan(desc, speed_bin, pvs_version);
+ if (!plan)
+ return -EINVAL;
+
+ cpr_fuses = cpr_get_fuses(desc, qfprom);
+ cpr_populate_ring_osc_idx(cpr_fuses, drv, qfprom);
+
+ adj = cpr_find_adjustment(speed_bin, pvs_version, cpr_rev, desc, drv);
+
+ cpr_fuse_corner_init(drv, desc, qfprom, cpr_fuses, speed_bin, adj,
+ acc_desc);
+ cpr_corner_init(drv, desc, cpr_fuses, speed_bin, pvs_version, qfprom,
+ adj, plan);
+
+ ret = cpr_populate_opps(dev->of_node, drv, plan);
+ if (ret)
+ return ret;
+
+ drv->loop_disabled = cpr_is_close_loop_disabled(drv, desc, qfprom,
+ cpr_fuses, adj);
+ pr_info("CPR closed loop is %sabled\n",
+ drv->loop_disabled ? "dis" : "en");
+
+ ret = cpr_init_parameters(pdev, drv);
+ if (ret)
+ return ret;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ return ret;
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ cpr_irq_handler, IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_init_and_enable_corner(drv);
+ if (ret)
+ return ret;
+
+ drv->reg_nb.notifier_call = cpr_regulator_notifier;
+ ret = regulator_register_notifier(drv->vdd_apc, &drv->reg_nb);
+ if (ret)
+ return ret;
+
+ drv->cpufreq_nb.notifier_call = cpr_cpufreq_notifier;
+ ret = cpufreq_register_notifier(&drv->cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ regulator_unregister_notifier(drv->vdd_apc, &drv->reg_nb);
+ return ret;
+ }
+
+ /*
+ * Ensure that enable state accurately reflects the case in which CPR
+ * is permanently disabled.
+ */
+ //cpr_vreg->enable &= !cpr_vreg->loop_disabled;
+
+ platform_set_drvdata(pdev, drv);
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,cpr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ .pm = &cpr_pm_ops,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-cpr");
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 56a17ec5b5efe..f4874e64e4ec1 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -20,6 +20,9 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+#include "internal.h"
struct qcom_rpm_reg {
struct device *dev;
@@ -44,6 +47,11 @@ struct rpm_regulator_req {
#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
+#define RPM_KEY_FLOOR 0x00636676 /* "vfc" */
+#define RPM_KEY_CORNER 0x6e726f63 /* "corn" */
+
+#define RPM_MIN_FLOOR_CORNER 0
+#define RPM_MAX_FLOOR_CORNER 6
static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
struct rpm_regulator_req *req,
@@ -56,6 +64,51 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
req, size);
}
+int qcom_rpm_set_floor(struct regulator *regulator, int floor)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_FLOOR;
+ req.nbytes = sizeof(u32);
+ req.value = floor;
+
+ if (floor < RPM_MIN_FLOOR_CORNER || floor > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set floor %d\n", floor);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_floor);
+
+int qcom_rpm_set_corner(struct regulator *regulator, int corner)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_CORNER;
+ req.nbytes = sizeof(u32);
+ req.value = corner;
+
+ if (corner < RPM_MIN_FLOOR_CORNER || corner > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ pr_info("Set corner to %d\n", corner);
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set corner %d\n", corner);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_corner);
+
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
@@ -291,6 +344,55 @@ static const struct regulator_desc pm8916_buck_hvo_smps = {
.ops = &rpm_smps_ldo_ops,
};
+static const struct regulator_desc pm8994_hfsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(1550000, 96, 158, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 159,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_ftsmps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(350000, 0, 199, 5000),
+ REGULATOR_LINEAR_RANGE(700000, 200, 349, 10000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 350,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 63, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 64,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE( 750000, 0, 30, 25000),
+ REGULATOR_LINEAR_RANGE(1500000, 31, 99, 50000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 100,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8994_switch = {
+ .ops = &rpm_switch_ops,
+};
+
+static const struct regulator_desc pm8994_lnldo = {
+ .fixed_uV = 1740000,
+ .n_voltages = 1,
+ .ops = &rpm_smps_ldo_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -429,10 +531,61 @@ static const struct rpm_regulator_data rpm_pma8084_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8994_ftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8994_ftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8994_hfsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8994_hfsmps, "vdd_s4" },
+ { "s5", QCOM_SMD_RPM_SMPA, 5, &pm8994_hfsmps, "vdd_s5" },
+ { "s6", QCOM_SMD_RPM_SMPA, 6, &pm8994_ftsmps, "vdd_s6" },
+ { "s7", QCOM_SMD_RPM_SMPA, 7, &pm8994_hfsmps, "vdd_s7" },
+ { "s8", QCOM_SMD_RPM_SMPA, 8, &pm8994_ftsmps, "vdd_s8" },
+ { "s9", QCOM_SMD_RPM_SMPA, 9, &pm8994_ftsmps, "vdd_s9" },
+ { "s10", QCOM_SMD_RPM_SMPA, 10, &pm8994_ftsmps, "vdd_s10" },
+ { "s12", QCOM_SMD_RPM_SMPA, 12, &pm8994_ftsmps, "vdd_s12" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8994_nldo, "vdd_l1" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8994_nldo, "vdd_l3_l11" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8994_lnldo, "vdd_l5_l7" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8994_lnldo, "vdd_l5_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8994_nldo, "vdd_l3_l11" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8994_pldo, "vdd_l14_l15" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8994_pldo, "vdd_l14_l15" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8994_pldo, "vdd_l17_l29" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm8994_pldo, "vdd_l20_l21" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm8994_pldo, "vdd_l20_l21" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm8994_pldo, "vdd_l9_l10_l18_l22" },
+ { "l23", QCOM_SMD_RPM_LDOA, 23, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l24", QCOM_SMD_RPM_LDOA, 24, &pm8994_pldo, "vdd_l13_l19_l23_l24" },
+ { "l25", QCOM_SMD_RPM_LDOA, 25, &pm8994_pldo, "vdd_l25" },
+ { "l26", QCOM_SMD_RPM_LDOA, 26, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l27", QCOM_SMD_RPM_LDOA, 27, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l28", QCOM_SMD_RPM_LDOA, 28, &pm8994_nldo, "vdd_l2_l26_l28" },
+ { "l29", QCOM_SMD_RPM_LDOA, 29, &pm8994_pldo, "vdd_l17_l29" },
+ { "l30", QCOM_SMD_RPM_LDOA, 30, &pm8994_pldo, "vdd_l8_l16_l30" },
+ { "l31", QCOM_SMD_RPM_LDOA, 31, &pm8994_nldo, "vdd_l4_l27_l31" },
+ { "l32", QCOM_SMD_RPM_LDOA, 32, &pm8994_pldo, "vdd_l6_l12_l32" },
+ { "lvs1", QCOM_SMD_RPM_VSA, 1, &pm8994_switch },
+ { "lvs2", QCOM_SMD_RPM_VSA, 2, &pm8994_switch },
+
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8994-regulators", .data = &rpm_pm8994_regulators },
{ .compatible = "qcom,rpm-pma8084-regulators", .data = &rpm_pma8084_regulators },
{}
};
diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c
index 88a5dc88badc7..3a2d98dbe7d2d 100644
--- a/drivers/regulator/qcom_spmi-regulator.c
+++ b/drivers/regulator/qcom_spmi-regulator.c
@@ -246,6 +246,7 @@ enum spmi_common_control_register_index {
/* Minimum voltage stepper delay for each step. */
#define SPMI_FTSMPS_STEP_DELAY 8
+#define SPMI_DEFAULT_STEP_DELAY 20
/*
* The ratio SPMI_FTSMPS_STEP_MARGIN_NUM/SPMI_FTSMPS_STEP_MARGIN_DEN is used to
@@ -254,13 +255,6 @@ enum spmi_common_control_register_index {
#define SPMI_FTSMPS_STEP_MARGIN_NUM 4
#define SPMI_FTSMPS_STEP_MARGIN_DEN 5
-/*
- * This voltage in uV is returned by get_voltage functions when there is no way
- * to determine the current voltage level. It is needed because the regulator
- * framework treats a 0 uV voltage as an error.
- */
-#define VOLTAGE_UNKNOWN 1
-
/* VSET value to decide the range of ULT SMPS */
#define ULT_SMPS_RANGE_SPLIT 0x60
@@ -539,12 +533,12 @@ static int spmi_regulator_common_disable(struct regulator_dev *rdev)
}
static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
int lim_min_uV, lim_max_uV, i, range_id, range_max_uV;
+ int selector, voltage_sel;
/* Check if request voltage is outside of physically settable range. */
lim_min_uV = vreg->set_points->range[0].set_point_min_uV;
@@ -570,14 +564,13 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
range_id = i;
range = &vreg->set_points->range[range_id];
- *range_sel = range->range_sel;
/*
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = voltage_sel * range->step_uV + range->min_uV;
if (uV > max_uV) {
dev_err(vreg->dev,
@@ -587,12 +580,48 @@ static int spmi_regulator_select_voltage(struct spmi_regulator *vreg,
return -EINVAL;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < range_id; i++)
- *selector += vreg->set_points->range[i].n_voltages;
- *selector += (uV - range->set_point_min_uV) / range->step_uV;
+ selector += vreg->set_points->range[i].n_voltages;
+ selector += (uV - range->set_point_min_uV) / range->step_uV;
- return 0;
+ return selector;
+}
+
+static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg,
+ unsigned selector, u8 *range_sel,
+ u8 *voltage_sel)
+{
+ const struct spmi_voltage_range *range, *end;
+
+ range = vreg->set_points->range;
+ end = range + vreg->set_points->count;
+
+ for (; range < end; range++) {
+ if (selector < range->n_voltages) {
+ *voltage_sel = selector;
+ *range_sel = range->range_sel;
+ return 0;
+ }
+
+ selector -= range->n_voltages;
+ }
+
+ return -EINVAL;
+}
+
+static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel,
+ const struct spmi_voltage_range *range)
+{
+ int sw_sel = hw_sel;
+ const struct spmi_voltage_range *r = vreg->set_points->range;
+
+ while (r != range) {
+ sw_sel += r->n_voltages;
+ r++;
+ }
+
+ return sw_sel;
}
static const struct spmi_voltage_range *
@@ -614,12 +643,11 @@ spmi_regulator_find_range(struct spmi_regulator *vreg)
}
static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
- int min_uV, int max_uV, u8 *range_sel, u8 *voltage_sel,
- unsigned *selector)
+ int min_uV, int max_uV)
{
const struct spmi_voltage_range *range;
int uV = min_uV;
- int i;
+ int i, selector;
range = spmi_regulator_find_range(vreg);
if (!range)
@@ -637,8 +665,8 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
* Force uV to be an allowed set point by applying a ceiling function to
* the uV value.
*/
- *voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
- uV = *voltage_sel * range->step_uV + range->min_uV;
+ uV = DIV_ROUND_UP(uV - range->min_uV, range->step_uV);
+ uV = uV * range->step_uV + range->min_uV;
if (uV > max_uV) {
/*
@@ -648,43 +676,49 @@ static int spmi_regulator_select_voltage_same_range(struct spmi_regulator *vreg,
goto different_range;
}
- *selector = 0;
+ selector = 0;
for (i = 0; i < vreg->set_points->count; i++) {
if (uV >= vreg->set_points->range[i].set_point_min_uV
&& uV <= vreg->set_points->range[i].set_point_max_uV) {
- *selector +=
+ selector +=
(uV - vreg->set_points->range[i].set_point_min_uV)
/ vreg->set_points->range[i].step_uV;
break;
}
- *selector += vreg->set_points->range[i].n_voltages;
+ selector += vreg->set_points->range[i].n_voltages;
}
- if (*selector >= vreg->set_points->n_voltages)
+ if (selector >= vreg->set_points->n_voltages)
goto different_range;
- return 0;
+ return selector;
different_range:
- return spmi_regulator_select_voltage(vreg, min_uV, max_uV,
- range_sel, voltage_sel, selector);
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
}
-static int spmi_regulator_common_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_common_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 buf[2];
- u8 range_sel, voltage_sel;
/*
* Favor staying in the current voltage range if possible. This avoids
* voltage spikes that occur when changing the voltage range.
*/
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ return spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV);
+}
+
+static int
+spmi_regulator_common_set_voltage(struct regulator_dev *rdev, unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ int ret;
+ u8 buf[2];
+ u8 range_sel, voltage_sel;
+
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -719,24 +753,24 @@ static int spmi_regulator_common_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
-static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+static int spmi_regulator_single_map_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- int ret;
- u8 range_sel, sel;
- ret = spmi_regulator_select_voltage(vreg, min_uV, max_uV, &range_sel,
- &sel, selector);
- if (ret) {
- dev_err(vreg->dev, "could not set voltage, ret=%d\n", ret);
- return ret;
- }
+ return spmi_regulator_select_voltage(vreg, min_uV, max_uV);
+}
+
+static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
+ u8 sel = selector;
/*
* Certain types of regulators do not have a range select register so
@@ -748,27 +782,24 @@ static int spmi_regulator_single_range_set_voltage(struct regulator_dev *rdev,
static int spmi_regulator_single_range_get_voltage(struct regulator_dev *rdev)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
- const struct spmi_voltage_range *range = vreg->set_points->range;
- u8 voltage_sel;
+ u8 selector;
+ int ret;
- spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &voltage_sel, 1);
+ ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_VOLTAGE_SET, &selector, 1);
+ if (ret)
+ return ret;
- return range->step_uV * voltage_sel + range->min_uV;
+ return selector;
}
static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV, unsigned *selector)
+ unsigned selector)
{
struct spmi_regulator *vreg = rdev_get_drvdata(rdev);
int ret;
u8 range_sel, voltage_sel;
- /*
- * Favor staying in the current voltage range if possible. This avoids
- * voltage spikes that occur when changing the voltage range.
- */
- ret = spmi_regulator_select_voltage_same_range(vreg, min_uV, max_uV,
- &range_sel, &voltage_sel, selector);
+ ret = spmi_sw_selector_to_hw(vreg, selector, &range_sel, &voltage_sel);
if (ret)
return ret;
@@ -783,7 +814,7 @@ static int spmi_regulator_ult_lo_smps_set_voltage(struct regulator_dev *rdev,
voltage_sel |= ULT_SMPS_RANGE_SPLIT;
return spmi_vreg_update_bits(vreg, SPMI_COMMON_REG_VOLTAGE_SET,
- voltage_sel, 0xff);
+ voltage_sel, 0xff);
}
static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
@@ -796,12 +827,12 @@ static int spmi_regulator_ult_lo_smps_get_voltage(struct regulator_dev *rdev)
range = spmi_regulator_find_range(vreg);
if (!range)
- return VOLTAGE_UNKNOWN;
+ return -EINVAL;
if (range->range_sel == 1)
voltage_sel &= ~ULT_SMPS_RANGE_SPLIT;
- return range->step_uV * voltage_sel + range->min_uV;
+ return spmi_hw_selector_to_sw(vreg, voltage_sel, range);
}
static int spmi_regulator_common_list_voltage(struct regulator_dev *rdev,
@@ -1007,8 +1038,10 @@ static struct regulator_ops spmi_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1020,8 +1053,9 @@ static struct regulator_ops spmi_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1036,8 +1070,9 @@ static struct regulator_ops spmi_ln_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_bypass = spmi_regulator_common_set_bypass,
.get_bypass = spmi_regulator_common_get_bypass,
@@ -1056,8 +1091,9 @@ static struct regulator_ops spmi_boost_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_input_current_limit = spmi_regulator_set_ilim,
};
@@ -1066,9 +1102,10 @@ static struct regulator_ops spmi_ftsmps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_common_set_voltage,
+ .set_voltage_sel = spmi_regulator_common_set_voltage,
.set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
- .get_voltage = spmi_regulator_common_get_voltage,
+ .get_voltage_sel = spmi_regulator_common_get_voltage,
+ .map_voltage = spmi_regulator_common_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1080,8 +1117,9 @@ static struct regulator_ops spmi_ult_lo_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_ult_lo_smps_set_voltage,
- .get_voltage = spmi_regulator_ult_lo_smps_get_voltage,
+ .set_voltage_sel = spmi_regulator_ult_lo_smps_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_ult_lo_smps_get_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1093,8 +1131,10 @@ static struct regulator_ops spmi_ult_ho_smps_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .set_voltage_time_sel = spmi_regulator_set_voltage_time_sel,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1106,8 +1146,9 @@ static struct regulator_ops spmi_ult_ldo_ops = {
.enable = spmi_regulator_common_enable,
.disable = spmi_regulator_common_disable,
.is_enabled = spmi_regulator_common_is_enabled,
- .set_voltage = spmi_regulator_single_range_set_voltage,
- .get_voltage = spmi_regulator_single_range_get_voltage,
+ .set_voltage_sel = spmi_regulator_single_range_set_voltage,
+ .get_voltage_sel = spmi_regulator_single_range_get_voltage,
+ .map_voltage = spmi_regulator_single_map_voltage,
.list_voltage = spmi_regulator_common_list_voltage,
.set_mode = spmi_regulator_common_set_mode,
.get_mode = spmi_regulator_common_get_mode,
@@ -1201,7 +1242,7 @@ static int spmi_regulator_match(struct spmi_regulator *vreg, u16 force_type)
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_DIG_MAJOR_REV, version,
ARRAY_SIZE(version));
if (ret) {
- dev_err(vreg->dev, "could not read version registers\n");
+ dev_dbg(vreg->dev, "could not read version registers\n");
return ret;
}
dig_major_rev = version[SPMI_COMMON_REG_DIG_MAJOR_REV
@@ -1245,11 +1286,11 @@ found:
return 0;
}
-static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
+static int spmi_regulator_init_slew_rate(struct spmi_regulator *vreg)
{
int ret;
u8 reg = 0;
- int step, delay, slew_rate;
+ int step, delay, slew_rate, step_delay;
const struct spmi_voltage_range *range;
ret = spmi_vreg_read(vreg, SPMI_COMMON_REG_STEP_CTRL, &reg, 1);
@@ -1262,6 +1303,15 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
if (!range)
return -EINVAL;
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ step_delay = SPMI_FTSMPS_STEP_DELAY;
+ break;
+ default:
+ step_delay = SPMI_DEFAULT_STEP_DELAY;
+ break;
+ }
+
step = reg & SPMI_FTSMPS_STEP_CTRL_STEP_MASK;
step >>= SPMI_FTSMPS_STEP_CTRL_STEP_SHIFT;
@@ -1270,7 +1320,7 @@ static int spmi_regulator_ftsmps_init_slew_rate(struct spmi_regulator *vreg)
/* slew_rate has units of uV/us */
slew_rate = SPMI_FTSMPS_CLOCK_RATE * range->step_uV * (1 << step);
- slew_rate /= 1000 * (SPMI_FTSMPS_STEP_DELAY << delay);
+ slew_rate /= 1000 * (step_delay << delay);
slew_rate *= SPMI_FTSMPS_STEP_MARGIN_NUM;
slew_rate /= SPMI_FTSMPS_STEP_MARGIN_DEN;
@@ -1411,10 +1461,16 @@ static int spmi_regulator_of_parse(struct device_node *node,
return ret;
}
- if (vreg->logical_type == SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS) {
- ret = spmi_regulator_ftsmps_init_slew_rate(vreg);
+ switch (vreg->logical_type) {
+ case SPMI_REGULATOR_LOGICAL_TYPE_FTSMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_LO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_ULT_HO_SMPS:
+ case SPMI_REGULATOR_LOGICAL_TYPE_SMPS:
+ ret = spmi_regulator_init_slew_rate(vreg);
if (ret)
return ret;
+ default:
+ break;
}
if (vreg->logical_type != SPMI_REGULATOR_LOGICAL_TYPE_VS)
@@ -1510,14 +1566,73 @@ static const struct spmi_regulator_data pm8916_regulators[] = {
{ }
};
+static const struct spmi_regulator_data pm8994_regulators[] = {
+ { "s1", 0x1400, "vdd_s1", },
+ { "s2", 0x1700, "vdd_s2", },
+ { "s3", 0x1a00, "vdd_s3", },
+ { "s4", 0x1d00, "vdd_s4", },
+ { "s5", 0x2000, "vdd_s5", },
+ { "s6", 0x2300, "vdd_s6", },
+ { "s7", 0x2600, "vdd_s7", },
+ { "s8", 0x2900, "vdd_s8", },
+ { "s9", 0x2c00, "vdd_s9", },
+ { "s10", 0x2f00, "vdd_s10", },
+ { "s11", 0x3200, "vdd_s11", },
+ { "s12", 0x3500, "vdd_s12", },
+ { "l1", 0x4000, "vdd_l1", },
+ { "l2", 0x4100, "vdd_l2_l26_l28", },
+ { "l3", 0x4200, "vdd_l3_l11", },
+ { "l4", 0x4300, "vdd_l4_l27_l31", },
+ { "l5", 0x4400, "vdd_l5_l7", },
+ { "l6", 0x4500, "vdd_l6_l12_l32", },
+ { "l7", 0x4600, "vdd_l5_l7", },
+ { "l8", 0x4700, "vdd_l8_l16_l30", },
+ { "l9", 0x4800, "vdd_l9_l10_l18_l22", },
+ { "l10", 0x4900, "vdd_l9_l10_l18_l22", },
+ { "l11", 0x4a00, "vdd_l3_l11", },
+ { "l12", 0x4b00, "vdd_l6_l12_l32", },
+ { "l13", 0x4c00, "vdd_l13_l19_l23_l24", },
+ { "l14", 0x4d00, "vdd_l14_l15", },
+ { "l15", 0x4e00, "vdd_l14_l15", },
+ { "l16", 0x4f00, "vdd_l8_l16_l30", },
+ { "l17", 0x5000, "vdd_l17_l29", },
+ { "l18", 0x5100, "vdd_l9_l10_l18_l22", },
+ { "l19", 0x5200, "vdd_l13_l19_l23_l24", },
+ { "l20", 0x5300, "vdd_l20_l21", },
+ { "l21", 0x5400, "vdd_l20_l21", },
+ { "l22", 0x5500, "vdd_l9_l10_l18_l22", },
+ { "l23", 0x5600, "vdd_l13_l19_l23_l24", },
+ { "l24", 0x5700, "vdd_l13_l19_l23_l24", },
+ { "l25", 0x5800, "vdd_l25", },
+ { "l26", 0x5900, "vdd_l2_l26_l28", },
+ { "l27", 0x5a00, "vdd_l4_l27_l31", },
+ { "l28", 0x5b00, "vdd_l2_l26_l28", },
+ { "l29", 0x5c00, "vdd_l17_l29", },
+ { "l30", 0x5d00, "vdd_l8_l16_l30", },
+ { "l31", 0x5e00, "vdd_l4_l27_l31", },
+ { "l32", 0x5f00, "vdd_l6_l12_l32", },
+ { "lvs1", 0x8000, "vdd_lvs_1_2", },
+ { "lvs2", 0x8100, "vdd_lvs_1_2", },
+ { }
+};
+
static const struct of_device_id qcom_spmi_regulator_match[] = {
{ .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators },
{ .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators },
{ .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators },
+ { .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match);
+extern int _regulator_get_voltage(struct regulator_dev *rdev);
+char name[] = "s11";
+struct platform_device_info devinfo = {
+ .name = "reg-virt-consumer",
+ .id = 0, /* if registering more than a single regulator, increment the ID */
+ .data = name,
+ .size_data = sizeof(name),
+};
static int qcom_spmi_regulator_probe(struct platform_device *pdev)
{
const struct spmi_regulator_data *reg;
@@ -1573,7 +1688,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
ret = spmi_regulator_match(vreg, reg->force_type);
if (ret)
- goto err;
+ continue;
config.dev = dev;
config.driver_data = vreg;
@@ -1588,6 +1703,7 @@ static int qcom_spmi_regulator_probe(struct platform_device *pdev)
list_add(&vreg->node, vreg_list);
}
+ platform_device_register_full(&devinfo);
return 0;
err:
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 72e97d7a52091..1ca52d1164830 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -77,6 +77,15 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config QCOM_Q6V5_PIL
+ tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ select QCOM_MDT_LOADER
+ help
+ Say y here to support the Qualcomm Peripherial Image Loader for the
+ Hexagon V5 based remote processors.
+
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
@@ -86,4 +95,27 @@ config ST_REMOTEPROC
processor framework.
This can be either built-in or a loadable module.
+config QCOM_MDT_LOADER
+ tristate
+
+config QCOM_VENUS_PIL
+ tristate "Qualcomm Veus Peripherial Image Loader"
+ help
+ Peripheral Image Loader for the Venus block.
+
+
+config QCOM_WCNSS_PIL
+ tristate "Qualcomm WCNSS Peripheral Image Loader"
+ help
+ Peripherial Image Loader for the WCNSS block.
+
+config QCOM_VENUS_PIL
+ tristate "Qualcomm Veus Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ select QCOM_MDT_LOADER
+ select QCOM_SCM
+ help
+ Peripheral Image Loader for the Venus block.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 279cb2edc8809..7bff258d9bcfc 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -12,3 +12,7 @@ obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
+obj-$(CONFIG_QCOM_MDT_LOADER) += qcom_mdt_loader.o
+obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss.o qcom_wcnss_iris.o
+obj-$(CONFIG_QCOM_VENUS_PIL) += qcom_venus.o
+obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
diff --git a/drivers/remoteproc/qcom_mdt_loader.c b/drivers/remoteproc/qcom_mdt_loader.c
new file mode 100644
index 0000000000000..8036164b0dfaf
--- /dev/null
+++ b/drivers/remoteproc/qcom_mdt_loader.c
@@ -0,0 +1,169 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2015 Sony Mobile Communications Inc
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/qcom_scm.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_mdt_loader.h"
+
+/**
+ * qcom_mdt_find_rsc_table() - provide dummy resource table for remoteproc
+ * @rproc: remoteproc handle
+ * @fw: firmware header
+ * @tablesz: outgoing size of the table
+ *
+ * Returns a dummy table.
+ */
+struct resource_table *qcom_mdt_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_find_rsc_table);
+
+int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, size_t *fw_size, bool *fw_relocate)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = 0;
+ bool relocate = false;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ if (phdr->p_flags & QCOM_MDT_RELOCATABLE)
+ relocate = true;
+
+ if (phdr->p_paddr < min_addr)
+ min_addr = phdr->p_paddr;
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = round_up(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ *fw_addr = min_addr;
+ *fw_size = max_addr - min_addr;
+ *fw_relocate = relocate;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_parse);
+
+/**
+ * qcom_mdt_load() - load the firmware which header is defined in fw
+ * @rproc: rproc handle
+ * @pas_id: PAS identifier to load this firmware into
+ * @fw: frimware object for the header
+ * @mem_phys: physical address of reserved memory region for the firmware
+ * @mem_region: pointer to a mapping of the reserved memory region
+ * @mem_size: size of the reserved memory region
+ *
+ * Returns 0 on success, negative errno otherwise.
+ */
+int qcom_mdt_load(struct rproc *rproc,
+ const struct firmware *fw,
+ const char *firmware,
+ phys_addr_t mem_phys,
+ void *mem_region,
+ size_t mem_size)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ unsigned fw_name_len;
+ char *fw_name;
+ void *ptr;
+ int ret;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+
+ fw_name_len = strlen(firmware);
+ if (fw_name_len <= 4)
+ return -EINVAL;
+
+ fw_name = kstrdup(firmware, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ ptr = mem_region + phdr->p_paddr - mem_phys;
+ if (ptr < mem_region || ptr + phdr->p_memsz > mem_region + mem_size) {
+ dev_err(&rproc->dev, "segment outside memory range\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (phdr->p_filesz) {
+ sprintf(fw_name + fw_name_len - 3, "b%02d", i);
+ ret = request_firmware(&fw, fw_name, &rproc->dev);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to load %s\n", fw_name);
+ break;
+ }
+
+ memcpy(ptr, fw->data, fw->size);
+
+ release_firmware(fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset(ptr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ }
+
+ kfree(fw_name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_mdt_load);
diff --git a/drivers/remoteproc/qcom_mdt_loader.h b/drivers/remoteproc/qcom_mdt_loader.h
new file mode 100644
index 0000000000000..86d5478018198
--- /dev/null
+++ b/drivers/remoteproc/qcom_mdt_loader.h
@@ -0,0 +1,13 @@
+#ifndef __QCOM_MDT_LOADER_H__
+#define __QCOM_MDT_LOADER_H__
+
+#define QCOM_MDT_TYPE_MASK (7 << 24)
+#define QCOM_MDT_TYPE_HASH (2 << 24)
+#define QCOM_MDT_RELOCATABLE BIT(27)
+
+struct resource_table * qcom_mdt_find_rsc_table(struct rproc *rproc, const struct firmware *fw, int *tablesz);
+int qcom_mdt_load(struct rproc *rproc, const struct firmware *fw, const char *fw_name, phys_addr_t mem_phys, void *mem_region, size_t mem_size);
+
+int qcom_mdt_parse(const struct firmware *fw, phys_addr_t *fw_addr, size_t *fw_size, bool *fw_relocate);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
new file mode 100644
index 0000000000000..bc828ea235f36
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -0,0 +1,772 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/elf.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+#include "qcom_mdt_loader.h"
+
+#include <linux/qcom_scm.h>
+
+#define MBA_FIRMWARE_NAME "mba.b00"
+#define MPSS_FIRMWARE_NAME "modem.mdt"
+
+#define MPSS_CRASH_REASON_SMEM 421
+
+#define VDD_MSS_UV_MIN 1000000
+#define VDD_MSS_UV_MAX 1150000
+#define VDD_MSS_UA 100000
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE 0x180
+#define MSS_MODEM_HALT_BASE 0x200
+#define MSS_NC_HALT_BASE 0x280
+
+/* RMB Status Register Values */
+#define RMB_PBL_SUCCESS 0x1
+
+#define RMB_MBA_XPU_UNLOCKED 0x1
+#define RMB_MBA_XPU_UNLOCKED_SCRIBBLED 0x2
+#define RMB_MBA_META_DATA_AUTH_SUCCESS 0x3
+#define RMB_MBA_AUTH_COMPLETE 0x4
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE_REG 0x00
+#define RMB_PBL_STATUS_REG 0x04
+#define RMB_MBA_COMMAND_REG 0x08
+#define RMB_MBA_STATUS_REG 0x0C
+#define RMB_PMI_META_DATA_REG 0x10
+#define RMB_PMI_CODE_START_REG 0x14
+#define RMB_PMI_CODE_LENGTH_REG 0x18
+
+#define RMB_CMD_META_DATA_READY 0x1
+#define RMB_CMD_LOAD_READY 0x2
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET_REG 0x014
+#define QDSP6SS_GFMUX_CTL_REG 0x020
+#define QDSP6SS_PWR_CTL_REG 0x030
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ_REG 0x0
+#define AXI_HALTACK_REG 0x4
+#define AXI_IDLE_REG 0x8
+
+#define HALT_ACK_TIMEOUT_MS 100
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENABLE BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENABLE BIT(1)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+struct q6v5 {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *reg_base;
+ void __iomem *halt_base;
+ void __iomem *rmb_base;
+
+ struct reset_control *mss_restart;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct regulator *vdd;
+
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *rom_clk;
+
+ struct completion start_done;
+
+ phys_addr_t mba_phys;
+ void *mba_region;
+ size_t mba_size;
+
+ phys_addr_t mpss_phys;
+ void *mpss_region;
+ size_t mpss_size;
+};
+
+static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct q6v5 *qproc = rproc->priv;
+
+ memcpy(qproc->mba_region, fw->data, fw->size);
+
+ return 0;
+}
+
+static const struct rproc_fw_ops q6v5_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = q6v5_load,
+};
+
+static int q6v5_rmb_pbl_wait(struct q6v5 *qproc, int ms)
+{
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_PBL_STATUS_REG);
+ if (val)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
+{
+
+ unsigned long timeout;
+ s32 val;
+
+ timeout = jiffies + msecs_to_jiffies(ms);
+ for (;;) {
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (val < 0)
+ break;
+
+ if (!status && val)
+ break;
+ else if (status && val == status)
+ break;
+
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ msleep(1);
+ }
+
+ return val;
+}
+
+static void q6v5proc_reset(struct q6v5 *qproc)
+{
+ u32 val;
+
+ /* Assert resets, stop core */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE);
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ mb();
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl(qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
+
+ /* Bring core out of reset */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+
+ /* Turn on core clock */
+ val = readl(qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE;
+ writel(val, qproc->reg_base + QDSP6SS_GFMUX_CTL_REG);
+
+ /* Start core execution */
+ val = readl(qproc->reg_base + QDSP6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, qproc->reg_base + QDSP6SS_RESET_REG);
+}
+
+static void q6v5proc_halt_axi_port(struct q6v5 *qproc, void __iomem *halt)
+{
+ unsigned long timeout;
+ u32 val;
+
+ /* Check if we're already idle */
+ if (readl(halt + AXI_IDLE_REG))
+ return;
+
+ /* Assert halt request */
+ writel(1, halt + AXI_HALTREQ_REG);
+
+ /* Wait for halt */
+ timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
+ for (;;) {
+ val = readl(halt + AXI_HALTACK_REG);
+ if (val || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ if (!readl(halt + AXI_IDLE_REG))
+ dev_err(qproc->dev, "port %pa failed halt\n", &halt);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel(0, halt + AXI_HALTREQ_REG);
+}
+
+static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
+{
+ int ret;
+
+ /* Use mpss memory as scratch buffer for the mdt validation */
+ memcpy(qproc->mpss_region, fw->data, fw->size);
+
+ writel(qproc->mpss_phys, qproc->rmb_base + RMB_PMI_META_DATA_REG);
+ writel(RMB_CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_META_DATA_AUTH_SUCCESS, 1000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MBA header authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MBA returned error %d for MDT header\n", ret);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int q6v5_mpss_validate(struct q6v5 *qproc, const struct firmware *fw)
+{
+ const struct elf32_phdr *phdrs;
+ const struct elf32_phdr *phdr;
+ struct elf32_hdr *ehdr;
+ size_t size;
+ u32 val;
+ int i;
+
+ ehdr = (struct elf32_hdr *)fw->data;
+ phdrs = (struct elf32_phdr *)(ehdr + 1);
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ phdr = &phdrs[i];
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if ((phdr->p_flags & QCOM_MDT_TYPE_MASK) == QCOM_MDT_TYPE_HASH)
+ continue;
+
+ if (!phdr->p_memsz)
+ continue;
+
+ size = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!size) {
+ writel(phdr->p_paddr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+
+ size += phdr->p_memsz;
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ return val < 0 ? val : 0;
+}
+
+static int q6v5_mpss_load(struct q6v5 *qproc)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, MPSS_FIRMWARE_NAME, qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load " MPSS_FIRMWARE_NAME "\n");
+ return ret;
+ }
+
+ /* Initialize the RMB validator */
+ writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = q6v5_mpss_init_image(qproc, fw);
+ if (ret)
+ goto release_firmware;
+
+ ret = qcom_mdt_load(qproc->rproc, fw, MPSS_FIRMWARE_NAME, qproc->mpss_phys, qproc->mpss_region, qproc->mpss_size);
+ if (ret)
+ goto release_firmware;
+
+ ret = q6v5_mpss_validate(qproc, fw);
+ if (ret)
+ goto release_firmware;
+
+ ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
+ if (ret == -ETIMEDOUT)
+ dev_err(qproc->dev, "MBA authentication timed out\n");
+ else if (ret < 0)
+ dev_err(qproc->dev, "MBA returned error %d\n", ret);
+
+release_firmware:
+ release_firmware(fw);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int q6v5_start(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+ int ret;
+
+ ret = regulator_enable(qproc->vdd);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable mss vdd\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(qproc->mss_restart);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(qproc->ahb_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_prepare_enable(qproc->axi_clk);
+ if (ret)
+ goto disable_ahb_clk;
+
+ ret = clk_prepare_enable(qproc->rom_clk);
+ if (ret)
+ goto disable_axi_clk;
+
+ writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+
+ q6v5proc_reset(qproc);
+
+ ret = q6v5_rmb_pbl_wait(qproc, 1000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "PBL boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_PBL_SUCCESS) {
+ dev_err(qproc->dev, "PBL returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ ret = q6v5_rmb_mba_wait(qproc, 0, 5000);
+ if (ret == -ETIMEDOUT) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ goto halt_axi_ports;
+ } else if (ret != RMB_MBA_XPU_UNLOCKED && ret != RMB_MBA_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", ret);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ dev_info(qproc->dev, "MBA booted, loading mpss\n");
+
+ ret = q6v5_mpss_load(qproc);
+ if (ret)
+ goto halt_axi_ports;
+
+ ret = wait_for_completion_timeout(&qproc->start_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0) {
+ dev_err(qproc->dev, "start timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ }
+
+ /* All done, release the handover resources */
+
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+disable_axi_clk:
+ clk_disable_unprepare(qproc->axi_clk);
+disable_ahb_clk:
+ clk_disable_unprepare(qproc->ahb_clk);
+assert_reset:
+ reset_control_assert(qproc->mss_restart);
+disable_vdd:
+ regulator_disable(qproc->vdd);
+
+ return ret;
+}
+
+static int q6v5_stop(struct rproc *rproc)
+{
+ struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+
+ reset_control_assert(qproc->mss_restart);
+ clk_disable_unprepare(qproc->axi_clk);
+ clk_disable_unprepare(qproc->ahb_clk);
+ regulator_disable(qproc->vdd);
+
+ return 0;
+}
+
+static const struct rproc_ops q6v5_ops = {
+ .start = q6v5_start,
+ .stop = q6v5_stop,
+};
+
+static irqreturn_t q6v5_wdog_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(qproc->dev, "watchdog received: %s\n", msg);
+ else
+ dev_err(qproc->dev, "watchdog without message\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_fatal_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, MPSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(qproc->dev, "fatal error received: %s\n", msg);
+ else
+ dev_err(qproc->dev, "fatal error without message\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_handover_interrupt(int irq, void *dev)
+{
+ struct q6v5 *qproc = dev;
+
+ complete(&qproc->start_done);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t q6v5_stop_ack_interrupt(int irq, void *dev)
+{
+ return IRQ_HANDLED;
+}
+
+static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+ qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->reg_base)) {
+ dev_err(qproc->dev, "failed to get qdsp6_base\n");
+ return PTR_ERR(qproc->reg_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+ qproc->halt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->halt_base)) {
+ dev_err(qproc->dev, "failed to get halt_base\n");
+ return PTR_ERR(qproc->halt_base);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb_base");
+ qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->rmb_base)) {
+ dev_err(qproc->dev, "failed to get rmb_base\n");
+ return PTR_ERR(qproc->rmb_base);
+ }
+
+ return 0;
+}
+
+static int q6v5_init_clocks(struct q6v5 *qproc)
+{
+ qproc->ahb_clk = devm_clk_get(qproc->dev, "iface");
+ if (IS_ERR(qproc->ahb_clk)) {
+ dev_err(qproc->dev, "failed to get iface clock\n");
+ return PTR_ERR(qproc->ahb_clk);
+ }
+
+ qproc->axi_clk = devm_clk_get(qproc->dev, "bus");
+ if (IS_ERR(qproc->axi_clk)) {
+ dev_err(qproc->dev, "failed to get bus clock\n");
+ return PTR_ERR(qproc->axi_clk);
+ }
+
+ qproc->rom_clk = devm_clk_get(qproc->dev, "mem");
+ if (IS_ERR(qproc->rom_clk)) {
+ dev_err(qproc->dev, "failed to get mem clock\n");
+ return PTR_ERR(qproc->rom_clk);
+ }
+
+ return 0;
+}
+
+static int q6v5_init_regulators(struct q6v5 *qproc)
+{
+ qproc->vdd = devm_regulator_get(qproc->dev, "vdd");
+ if (IS_ERR(qproc->vdd)) {
+ dev_err(qproc->dev, "failed to get vdd supply\n");
+ return PTR_ERR(qproc->vdd);
+ }
+
+ regulator_set_voltage(qproc->vdd, VDD_MSS_UV_MIN, VDD_MSS_UV_MAX);
+ regulator_set_load(qproc->vdd, VDD_MSS_UA);
+
+ return 0;
+}
+
+static int q6v5_init_reset(struct q6v5 *qproc)
+{
+ qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
+ if (IS_ERR(qproc->mss_restart)) {
+ dev_err(qproc->dev, "failed to acquire mss restart\n");
+ return PTR_ERR(qproc->mss_restart);
+ }
+
+ return 0;
+}
+
+static int q6v5_request_irq(struct q6v5 *qproc,
+ struct platform_device *pdev,
+ const char *name,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "q6v5", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int q6v5_alloc_memory_region(struct q6v5 *qproc)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(qproc->dev->of_node, "memory-region", 0);
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mba region\n");
+ return ret;
+ }
+
+ qproc->mba_phys = r.start;
+ qproc->mba_size = resource_size(&r);
+ qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
+ if (!qproc->mba_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, qproc->mba_size);
+ return -EBUSY;
+ }
+
+ node = of_parse_phandle(qproc->dev->of_node, "memory-region", 1);
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret) {
+ dev_err(qproc->dev, "unable to resolve mpss region\n");
+ return ret;
+ }
+
+ qproc->mpss_phys = r.start;
+ qproc->mpss_size = resource_size(&r);
+ qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
+ if (!qproc->mpss_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, qproc->mpss_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int q6v5_probe(struct platform_device *pdev)
+{
+ struct q6v5 *qproc;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
+ MBA_FIRMWARE_NAME, sizeof(*qproc));
+ if (!rproc) {
+ dev_err(&pdev->dev, "failed to allocate rproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &q6v5_fw_ops;
+
+ qproc = (struct q6v5 *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+
+ ret = q6v5_init_mem(qproc, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_alloc_memory_region(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_regulators(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_init_reset(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "wdog", q6v5_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "fatal", q6v5_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "handover", q6v5_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ ret = q6v5_request_irq(qproc, pdev, "stop-ack", q6v5_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+
+ qproc->state = qcom_smem_state_get(&pdev->dev, "stop", &qproc->stop_bit);
+ if (IS_ERR(qproc->state))
+ goto free_rproc;
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int q6v5_remove(struct platform_device *pdev)
+{
+ struct q6v5 *qproc = platform_get_drvdata(pdev);
+
+ rproc_del(qproc->rproc);
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id q6v5_of_match[] = {
+ { .compatible = "qcom,q6v5-pil", },
+ { },
+};
+
+static struct platform_driver q6v5_driver = {
+ .probe = q6v5_probe,
+ .remove = q6v5_remove,
+ .driver = {
+ .name = "qcom-q6v5-pil",
+ .of_match_table = q6v5_of_match,
+ },
+};
+
+module_platform_driver(q6v5_driver);
diff --git a/drivers/remoteproc/qcom_venus.c b/drivers/remoteproc/qcom_venus.c
new file mode 100644
index 0000000000000..ef847a3769873
--- /dev/null
+++ b/drivers/remoteproc/qcom_venus.c
@@ -0,0 +1,205 @@
+/*
+ * Qualcomm Venus Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/remoteproc.h>
+
+#include "qcom_mdt_loader.h"
+#include "remoteproc_internal.h"
+
+#define VENUS_CRASH_REASON_SMEM 425
+#define VENUS_FIRMWARE_NAME "venus.mdt"
+#define VENUS_PAS_ID 9
+
+struct qcom_venus {
+ struct device *dev;
+ struct rproc *rproc;
+
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+};
+
+static int venus_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_venus *venus = (struct qcom_venus *)rproc->priv;
+ phys_addr_t fw_addr;
+ size_t fw_size;
+ bool relocate;
+ int ret;
+
+ ret = qcom_scm_pas_init_image(VENUS_PAS_ID, fw->data, fw->size);
+ if (ret) {
+ dev_err(&rproc->dev, "invalid firmware metadata\n");
+ return -EINVAL;
+ }
+
+ ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to parse mdt header\n");
+ return ret;
+ }
+
+ if (relocate) {
+ ret = qcom_scm_pas_mem_setup(VENUS_PAS_ID, venus->mem_phys, fw_size);
+ if (ret) {
+ dev_err(&rproc->dev, "unable to setup memory for image\n");
+ return -EINVAL;
+ }
+ }
+
+ return qcom_mdt_load(rproc, fw, rproc->firmware, venus->mem_phys,
+ venus->mem_region, venus->mem_size);
+}
+
+static const struct rproc_fw_ops venus_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = venus_load,
+};
+
+static int venus_start(struct rproc *rproc)
+{
+ struct qcom_venus *venus = (struct qcom_venus *)rproc->priv;
+ int ret;
+
+ ret = qcom_scm_pas_auth_and_reset(VENUS_PAS_ID);
+ if (ret)
+ dev_err(venus->dev,
+ "failed to authenticate image and release reset\n");
+
+ return ret;
+}
+
+static int venus_stop(struct rproc *rproc)
+{
+ struct qcom_venus *venus = (struct qcom_venus *)rproc->priv;
+ int ret;
+
+ ret = qcom_scm_pas_shutdown(VENUS_PAS_ID);
+ if (ret)
+ dev_err(venus->dev, "failed to shutdown: %d\n", ret);
+
+ return ret;
+}
+
+static const struct rproc_ops venus_ops = {
+ .start = venus_start,
+ .stop = venus_stop,
+};
+
+static int venus_alloc_memory_region(struct qcom_venus *venus)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(venus->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(venus->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ venus->mem_phys = r.start;
+ venus->mem_size = resource_size(&r);
+ venus->mem_region = devm_ioremap_wc(venus->dev, venus->mem_phys, venus->mem_size);
+ if (!venus->mem_region) {
+ dev_err(venus->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, venus->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int venus_probe(struct platform_device *pdev)
+{
+ struct qcom_venus *venus;
+ struct rproc *rproc;
+ int ret;
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ if (!qcom_scm_pas_supported(VENUS_PAS_ID)) {
+ dev_err(&pdev->dev, "PAS is not available for venus\n");
+ return -ENXIO;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &venus_ops,
+ VENUS_FIRMWARE_NAME, sizeof(*venus));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &venus_fw_ops;
+
+ venus = (struct qcom_venus *)rproc->priv;
+ venus->dev = &pdev->dev;
+ venus->rproc = rproc;
+ platform_set_drvdata(pdev, venus);
+
+ ret = venus_alloc_memory_region(venus);
+ if (ret)
+ goto free_rproc;
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return 0;
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int venus_remove(struct platform_device *pdev)
+{
+ struct qcom_venus *venus = platform_get_drvdata(pdev);
+
+ rproc_del(venus->rproc);
+ rproc_put(venus->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id venus_of_match[] = {
+ { .compatible = "qcom,venus-pil" },
+ { },
+};
+
+static struct platform_driver venus_driver = {
+ .probe = venus_probe,
+ .remove = venus_remove,
+ .driver = {
+ .name = "qcom-venus-pil",
+ .of_match_table = venus_of_match,
+ },
+};
+
+module_platform_driver(venus_driver);
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
new file mode 100644
index 0000000000000..5021cf5ff4b0f
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -0,0 +1,604 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+#include "qcom_mdt_loader.h"
+#include "remoteproc_internal.h"
+#include "qcom_wcnss.h"
+
+#define WCNSS_CRASH_REASON_SMEM 422
+#define WCNSS_FIRMWARE_NAME "wcnss.mdt"
+#define WCNSS_PAS_ID 6
+
+#define WCNSS_SPARE_NVBIN_DLND BIT(25)
+
+#define WCNSS_PMU_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_GC_BUS_MUX_SEL_TOP BIT(5)
+#define WCNSS_PMU_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_IRIS_RESET BIT(7)
+#define WCNSS_PMU_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_IRIS_XO_READ_STS BIT(10)
+
+#define WCNSS_PMU_XO_MODE_MASK GENMASK(2, 1)
+#define WCNSS_PMU_XO_MODE_19p2 0
+#define WCNSS_PMU_XO_MODE_48 3
+
+static const struct rproc_ops wcnss_ops;
+
+struct wcnss_data {
+ size_t pmu_offset;
+ size_t spare_offset;
+
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs;
+};
+
+struct qcom_wcnss {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *pmu_cfg;
+ void __iomem *spare_out;
+
+ bool use_48mhz_xo;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct qcom_smem_state *state;
+ unsigned stop_bit;
+
+ struct mutex iris_lock;
+ struct qcom_iris *iris;
+
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ phys_addr_t mem_phys;
+ void *mem_region;
+ size_t mem_size;
+};
+
+static const struct wcnss_data riva_data = {
+ .pmu_offset = 0x28,
+ .spare_offset = 0xb4,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1050000, 1150000, 0 },
+ { "vddcx", 1050000, 1150000, 0 },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+static const struct wcnss_data pronto_v1_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 950000, 1150000, 0 },
+ { "vddcx", .super_turbo = true},
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+static const struct wcnss_data pronto_v2_data = {
+ .pmu_offset = 0x1004,
+ .spare_offset = 0x1088,
+
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddmx", 1287500, 1287500, 0 },
+ { "vddcx", .super_turbo = true },
+ { "vddpx", 1800000, 1800000, 0 },
+ },
+ .num_vregs = 3,
+};
+
+void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
+ struct qcom_iris *iris,
+ bool use_48mhz_xo)
+{
+ mutex_lock(&wcnss->iris_lock);
+
+ wcnss->iris = iris;
+ wcnss->use_48mhz_xo = use_48mhz_xo;
+
+ mutex_unlock(&wcnss->iris_lock);
+}
+
+static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ phys_addr_t fw_addr;
+ size_t fw_size;
+ bool relocate;
+ int ret;
+
+ ret = qcom_scm_pas_init_image(WCNSS_PAS_ID, fw->data, fw->size);
+ if (ret) {
+ dev_err(&rproc->dev, "invalid firmware metadata\n");
+ return -EINVAL;
+ }
+
+ ret = qcom_mdt_parse(fw, &fw_addr, &fw_size, &relocate);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to parse mdt header\n");
+ return ret;
+ }
+
+ if (relocate) {
+ ret = qcom_scm_pas_mem_setup(WCNSS_PAS_ID, wcnss->mem_phys, fw_size);
+ if (ret) {
+ dev_err(&rproc->dev, "unable to setup memory for image\n");
+ return -EINVAL;
+ }
+ }
+
+ return qcom_mdt_load(rproc, fw, rproc->firmware, wcnss->mem_phys,
+ wcnss->mem_region, wcnss->mem_size);
+}
+
+static const struct rproc_fw_ops wcnss_fw_ops = {
+ .find_rsc_table = qcom_mdt_find_rsc_table,
+ .load = wcnss_load,
+};
+
+static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Indicate NV download capability */
+ val = readl(wcnss->spare_out);
+ val |= WCNSS_SPARE_NVBIN_DLND;
+ writel(val, wcnss->spare_out);
+}
+
+static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
+{
+ u32 val;
+
+ /* Clear PMU cfg register */
+ writel(0, wcnss->pmu_cfg);
+
+ val = WCNSS_PMU_GC_BUS_MUX_SEL_TOP | WCNSS_PMU_IRIS_XO_EN;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Clear XO_MODE */
+ val &= ~WCNSS_PMU_XO_MODE_MASK;
+ if (wcnss->use_48mhz_xo)
+ val |= WCNSS_PMU_XO_MODE_48 << 1;
+ else
+ val |= WCNSS_PMU_XO_MODE_19p2 << 1;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Reset IRIS */
+ val |= WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for PMU.iris_reg_reset_sts */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* Clear IRIS reset */
+ val &= ~WCNSS_PMU_IRIS_RESET;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Start IRIS XO configuration */
+ val |= WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Wait for XO configuration to finish */
+ while (readl(wcnss->pmu_cfg) & WCNSS_PMU_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ val &= ~WCNSS_PMU_GC_BUS_MUX_SEL_TOP;
+ val &= ~WCNSS_PMU_IRIS_XO_CFG;
+ writel(val, wcnss->pmu_cfg);
+
+ /* Add some delay for XO to settle */
+ msleep(20);
+}
+
+static int wcnss_start(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ mutex_lock(&wcnss->iris_lock);
+ if (!wcnss->iris) {
+ dev_err(wcnss->dev, "no iris registered\n");
+ ret = -EINVAL;
+ goto release_iris_lock;
+ }
+
+ ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
+ if (ret)
+ goto release_iris_lock;
+
+ ret = qcom_iris_enable(wcnss->iris);
+ if (ret)
+ goto disable_regulators;
+
+ wcnss_indicate_nv_download(wcnss);
+ wcnss_configure_iris(wcnss);
+
+ ret = qcom_scm_pas_auth_and_reset(WCNSS_PAS_ID);
+ if (ret) {
+ dev_err(wcnss->dev,
+ "failed to authenticate image and release reset\n");
+ goto disable_iris;
+ }
+
+ ret = wait_for_completion_timeout(&wcnss->start_done,
+ msecs_to_jiffies(5000));
+ if (wcnss->ready_irq > 0 && ret == 0) {
+ /* We have a ready_irq, but it didn't fire in time. */
+ dev_err(wcnss->dev, "start timed out\n");
+ qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ ret = -ETIMEDOUT;
+ goto disable_iris;
+ }
+
+ ret = 0;
+
+disable_iris:
+ qcom_iris_disable(wcnss->iris);
+disable_regulators:
+ regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
+release_iris_lock:
+ mutex_unlock(&wcnss->iris_lock);
+
+ return ret;
+}
+
+static int wcnss_stop(struct rproc *rproc)
+{
+ struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
+
+ if (wcnss->state) {
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ BIT(wcnss->stop_bit));
+
+ ret = wait_for_completion_timeout(&wcnss->stop_done,
+ msecs_to_jiffies(5000));
+ if (ret == 0)
+ dev_err(wcnss->dev, "timed out on wait\n");
+
+ qcom_smem_state_update_bits(wcnss->state,
+ BIT(wcnss->stop_bit),
+ 0);
+ }
+
+ ret = qcom_scm_pas_shutdown(WCNSS_PAS_ID);
+ if (ret)
+ dev_err(wcnss->dev, "failed to shutdown: %d\n", ret);
+
+ return ret;
+}
+
+static const struct rproc_ops wcnss_ops = {
+ .start = wcnss_start,
+ .stop = wcnss_stop,
+};
+
+static irqreturn_t wcnss_wdog_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ rproc_report_crash(wcnss->rproc, RPROC_WATCHDOG);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_fatal_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+ size_t len;
+ char *msg;
+
+ msg = qcom_smem_get(QCOM_SMEM_HOST_ANY, WCNSS_CRASH_REASON_SMEM, &len);
+ if (!IS_ERR(msg) && len > 0 && msg[0])
+ dev_err(wcnss->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(wcnss->rproc, RPROC_FATAL_ERROR);
+
+ if (!IS_ERR(msg))
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_ready_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_handover_interrupt(int irq, void *dev)
+{
+ /*
+ * XXX: At this point we're supposed to release the resources that we
+ * have been holding on behalf of the WCNSS. Unfortunately this
+ * interrupt comes way before the other side seems to be done.
+ *
+ * So we're currently relying on the ready interrupt firing later then
+ * this and we just disable the resources at the end of wcnss_start().
+ */
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qcom_wcnss *wcnss = dev;
+
+ complete(&wcnss->stop_done);
+ return IRQ_HANDLED;
+}
+
+static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
+ const struct wcnss_vreg_info *info,
+ int num_vregs)
+{
+ struct regulator_bulk_data *bulk;
+ int ret;
+ int i;
+
+ bulk = devm_kcalloc(wcnss->dev,
+ num_vregs, sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!bulk)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vregs; i++)
+ bulk[i].supply = info[i].name;
+
+ ret = devm_regulator_bulk_get(wcnss->dev, num_vregs, bulk);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_vregs; i++) {
+ if (info[i].max_voltage)
+ regulator_set_voltage(bulk[i].consumer,
+ info[i].min_voltage,
+ info[i].max_voltage);
+
+ if (info[i].load_uA)
+ regulator_set_load(bulk[i].consumer, info[i].load_uA);
+ }
+
+ wcnss->vregs = bulk;
+ wcnss->num_vregs = num_vregs;
+
+ return 0;
+}
+
+static int wcnss_request_irq(struct qcom_wcnss *wcnss,
+ struct platform_device *pdev,
+ const char *name,
+ bool optional,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0 && optional) {
+ dev_dbg(&pdev->dev, "no %s IRQ defined, ignoring\n", name);
+ return 0;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "wcnss", wcnss);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
+{
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(wcnss->dev->of_node, "memory-region", 0);
+ if (!node) {
+ dev_err(wcnss->dev, "no memory-region specified\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ wcnss->mem_phys = r.start;
+ wcnss->mem_size = resource_size(&r);
+ wcnss->mem_region = devm_ioremap_wc(wcnss->dev, wcnss->mem_phys, wcnss->mem_size);
+ if (!wcnss->mem_region) {
+ dev_err(wcnss->dev, "unable to map memory region: %pa+%zx\n",
+ &r.start, wcnss->mem_size);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int wcnss_probe(struct platform_device *pdev)
+{
+ const struct wcnss_data *data;
+ struct qcom_wcnss *wcnss;
+ struct resource *res;
+ struct rproc *rproc;
+ void __iomem *mmio;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ if (!qcom_scm_is_available())
+ return -EPROBE_DEFER;
+
+ if (!qcom_scm_pas_supported(WCNSS_PAS_ID)) {
+ dev_err(&pdev->dev, "PAS is not available for WCNSS\n");
+ return -ENXIO;
+ }
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
+ WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &wcnss_fw_ops;
+
+ wcnss = (struct qcom_wcnss *)rproc->priv;
+ wcnss->dev = &pdev->dev;
+ wcnss->rproc = rproc;
+ platform_set_drvdata(pdev, wcnss);
+
+ init_completion(&wcnss->start_done);
+ init_completion(&wcnss->stop_done);
+
+ mutex_init(&wcnss->iris_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pmu");
+ mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (!mmio) {
+ ret = -ENOMEM;
+ goto free_rproc;
+ };
+
+ ret = wcnss_alloc_memory_region(wcnss);
+ if (ret)
+ goto free_rproc;
+
+ wcnss->pmu_cfg = mmio + data->pmu_offset;
+ wcnss->spare_out = mmio + data->spare_offset;
+
+ ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs);
+ if (ret)
+ goto free_rproc;
+
+ ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->wdog_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->fatal_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->ready_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->handover_irq = ret;
+
+ ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ wcnss->stop_ack_irq = ret;
+
+ if (wcnss->stop_ack_irq) {
+ wcnss->state = qcom_smem_state_get(&pdev->dev, "stop",
+ &wcnss->stop_bit);
+ if (IS_ERR(wcnss->state)) {
+ ret = PTR_ERR(wcnss->state);
+ goto free_rproc;
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto free_rproc;
+
+ return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int wcnss_remove(struct platform_device *pdev)
+{
+ struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
+
+ of_platform_depopulate(&pdev->dev);
+
+ qcom_smem_state_put(wcnss->state);
+ rproc_del(wcnss->rproc);
+ rproc_put(wcnss->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id wcnss_of_match[] = {
+ { .compatible = "qcom,riva-pil", &riva_data },
+ { .compatible = "qcom,pronto-v1-pil", &pronto_v1_data },
+ { .compatible = "qcom,pronto-v2-pil", &pronto_v2_data },
+ { },
+};
+
+static struct platform_driver wcnss_driver = {
+ .probe = wcnss_probe,
+ .remove = wcnss_remove,
+ .driver = {
+ .name = "qcom-wcnss-pil",
+ .of_match_table = wcnss_of_match,
+ },
+};
+
+module_platform_driver(wcnss_driver);
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
new file mode 100644
index 0000000000000..9dc4a9fe41e19
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -0,0 +1,22 @@
+#ifndef __QCOM_WNCSS_H__
+#define __QCOM_WNCSS_H__
+
+struct qcom_iris;
+struct qcom_wcnss;
+
+struct wcnss_vreg_info {
+ const char * const name;
+ int min_voltage;
+ int max_voltage;
+
+ int load_uA;
+
+ bool super_turbo;
+};
+
+int qcom_iris_enable(struct qcom_iris *iris);
+void qcom_iris_disable(struct qcom_iris *iris);
+
+void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
+
+#endif
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
new file mode 100644
index 0000000000000..62a37583c0f84
--- /dev/null
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -0,0 +1,185 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2016 Linaro Ltd
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/qcom_scm.h>
+#include <linux/regulator/consumer.h>
+
+#include "qcom_wcnss.h"
+
+struct qcom_iris {
+ struct device *dev;
+
+ struct clk *xo_clk;
+
+ struct regulator_bulk_data *vregs;
+ size_t num_vregs;
+};
+
+struct iris_data {
+ const struct wcnss_vreg_info *vregs;
+ size_t num_vregs;
+
+ bool use_48mhz_xo;
+};
+
+static const struct iris_data wcn3620_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = false,
+};
+
+static const struct iris_data wcn3660_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 2900000, 3000000, 515000 },
+ { "vdddig", 1200000, 1225000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+static const struct iris_data wcn3680_data = {
+ .vregs = (struct wcnss_vreg_info[]) {
+ { "vddxo", 1800000, 1800000, 10000 },
+ { "vddrfa", 1300000, 1300000, 100000 },
+ { "vddpa", 3300000, 3300000, 515000 },
+ { "vdddig", 1800000, 1800000, 10000 },
+ },
+ .num_vregs = 4,
+ .use_48mhz_xo = true,
+};
+
+int qcom_iris_enable(struct qcom_iris *iris)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(iris->num_vregs, iris->vregs);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(iris->xo_clk);
+ if (ret) {
+ dev_err(iris->dev, "failed to enable xo clk\n");
+ goto disable_regulators;
+ }
+
+ return 0;
+
+disable_regulators:
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+
+ return ret;
+}
+
+void qcom_iris_disable(struct qcom_iris *iris)
+{
+ clk_disable_unprepare(iris->xo_clk);
+ regulator_bulk_disable(iris->num_vregs, iris->vregs);
+}
+
+static int qcom_iris_probe(struct platform_device *pdev)
+{
+ const struct iris_data *data;
+ struct qcom_wcnss *wcnss;
+ struct qcom_iris *iris;
+ int ret;
+ int i;
+
+ iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
+ if (!iris)
+ return -ENOMEM;
+
+ data = of_device_get_match_data(&pdev->dev);
+ wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ if (IS_ERR(iris->xo_clk)) {
+ if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to acquire xo clk\n");
+ return PTR_ERR(iris->xo_clk);
+ }
+
+ iris->num_vregs = data->num_vregs;
+ iris->vregs = devm_kcalloc(&pdev->dev,
+ iris->num_vregs,
+ sizeof(struct regulator_bulk_data),
+ GFP_KERNEL);
+ if (!iris->vregs)
+ return -ENOMEM;
+
+ for (i = 0; i < iris->num_vregs; i++)
+ iris->vregs[i].supply = data->vregs[i].name;
+
+ ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get regulators\n");
+ return ret;
+ }
+
+ for (i = 0; i < iris->num_vregs; i++) {
+ if (data->vregs[i].max_voltage)
+ regulator_set_voltage(iris->vregs[i].consumer,
+ data->vregs[i].min_voltage,
+ data->vregs[i].max_voltage);
+
+ if (data->vregs[i].load_uA)
+ regulator_set_load(iris->vregs[i].consumer,
+ data->vregs[i].load_uA);
+ }
+
+ qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
+
+ return 0;
+}
+
+static int qcom_iris_remove(struct platform_device *pdev)
+{
+ struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
+
+ qcom_wcnss_assign_iris(wcnss, NULL, false);
+
+ return 0;
+}
+
+static const struct of_device_id iris_of_match[] = {
+ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+ {}
+};
+
+static struct platform_driver wcnss_driver = {
+ .probe = qcom_iris_probe,
+ .remove = qcom_iris_remove,
+ .driver = {
+ .name = "qcom-iris",
+ .of_match_table = iris_of_match,
+ },
+};
+
+module_platform_driver(wcnss_driver);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 3d7d58a109d8e..c41f9652931bf 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -57,6 +57,8 @@ static DEFINE_IDA(rproc_dev_index);
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
+ [RPROC_WATCHDOG] = "watchdog",
+ [RPROC_FATAL_ERROR] = "fatal error",
};
/* translate rproc_crash_type to string */
@@ -789,6 +791,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
}
}
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw);
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -799,13 +803,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
struct resource_table *table, *loaded_table;
int ret, tablesz;
- if (!rproc->table_ptr)
- return -ENOMEM;
-
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
+ if (!rproc->table_ptr) {
+ ret = __rproc_fw_config_virtio(rproc, fw);
+ if (ret)
+ return ret;
+ }
+
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -856,12 +863,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
* copy this information to device memory.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (!loaded_table) {
- ret = -EINVAL;
- goto clean_up;
- }
-
- memcpy(loaded_table, rproc->cached_table, tablesz);
+ if (loaded_table)
+ memcpy(loaded_table, rproc->cached_table, tablesz);
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -897,19 +900,15 @@ clean_up:
* to unregister the device. one other option is just to use kref here,
* that might be cleaner).
*/
-static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw)
{
- struct rproc *rproc = context;
struct resource_table *table;
int ret, tablesz;
- if (rproc_fw_sanity_check(rproc, fw) < 0)
- goto out;
-
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw, &tablesz);
if (!table)
- goto out;
+ return -EINVAL;
rproc->table_csum = crc32(0, table, tablesz);
@@ -921,7 +920,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
*/
rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
if (!rproc->cached_table)
- goto out;
+ return -ENOMEM;
rproc->table_ptr = rproc->cached_table;
@@ -930,12 +929,21 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
ret = rproc_handle_resources(rproc, tablesz,
rproc_count_vrings_handler);
if (ret)
- goto out;
+ return ret;
/* look for virtio devices and register them */
ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
-out:
+ return ret;
+}
+
+static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+
+ if (rproc_fw_sanity_check(rproc, fw) >= 0)
+ __rproc_fw_config_virtio(rproc, fw);
+
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73f11313..fe9d50842355a 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -709,7 +709,8 @@ static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
return 0;
}
-#ifdef CONFIG_MSM_BUS_SCALING
+#if 0
+//#ifdef CONFIG_MSM_BUS_SCALING
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
const char *speed_mode)
{
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 4bb65669f0529..b78b8652bcac6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -550,7 +550,8 @@ struct ufs_hba {
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{
- return hba->caps & UFSHCD_CAP_CLK_GATING;
+ return false;
+ //return hba->caps & UFSHCD_CAP_CLK_GATING;
}
static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
{
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 461b387d03cce..fdf16698de020 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -49,6 +49,15 @@ config QCOM_SMD_RPM
frequencies controlled by the RPM on these devices.
Say M here if you want to include support for the Qualcomm RPM as a
+
+config MSM_GLINK
+ bool "Generic Link (G-Link)"
+ help
+ G-Link is a generic link transport that replaces SMD. It is used
+ within a System-on-Chip (SoC) for communication between both internal
+ processors and external peripherals. The actual physical transport
+ is handled by transport plug-ins that can be individually enabled and
+ configured separately.
module. This will build a module called "qcom-smd-rpm".
config QCOM_SMEM_STATE
@@ -76,3 +85,43 @@ config QCOM_WCNSS_CTRL
help
Client driver for the WCNSS_CTRL SMD channel, used to download nv
firmware to a newly booted WCNSS chip.
+
+config MSM_BUS_SCALING
+ bool "Bus scaling driver"
+ default n
+ help
+ This option enables bus scaling on MSM devices. Bus scaling
+ allows devices to request the clocks be set to rates sufficient
+ for the active devices needs without keeping the clocks at max
+ frequency when a slower speed is sufficient.
+
+config MSM_BUSPM_DEV
+ tristate "MSM Bus Performance Monitor Kernel Module"
+ depends on MSM_BUS_SCALING
+ default n
+ help
+ This kernel module is used to mmap() hardware registers for the
+ performance monitors, counters, etc. The module can also be used to
+ allocate physical memory which is used by bus performance hardware to
+ dump performance data
+
+config BUS_TOPOLOGY_ADHOC
+ bool "ad-hoc bus scaling topology"
+ default n
+ help
+ This option enables a driver that can handle adhoc bus topologies.
+ Adhoc bus topology driver allows one to many connections and maintains
+ directionality of connections by explicitly listing device connections
+ thus avoiding illegal routes.
+
+
+config MSM_GLINK_SMEM_NATIVE_XPRT
+ depends on QCOM_SMEM
+ depends on MSM_GLINK
+ bool "Generic Link (G-Link) SMEM Native Transport"
+ help
+ G-Link SMEM Native Transport is a G-Link Transport plug-in. It allows
+ G-Link communication to remote entities through a shared memory
+ physical transport. The nature of shared memory limits this G-Link
+ transport to only connecting with entities internal to the
+ System-on-Chip.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index fdd664edf0bdf..4fabef2426d36 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_ARM64) += cpu_ops.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
@@ -7,3 +8,8 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+
+obj-$(CONFIG_MSM_BUS_SCALING) += msm_bus/
+obj-$(CONFIG_BUS_TOPOLOGY_ADHOC) += msm_bus/
+obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o
+obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o
diff --git a/drivers/soc/qcom/cpu_ops.c b/drivers/soc/qcom/cpu_ops.c
new file mode 100644
index 0000000000000..d831cb071d3df
--- /dev/null
+++ b/drivers/soc/qcom/cpu_ops.c
@@ -0,0 +1,343 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* MSM ARMv8 CPU Operations
+ * Based on arch/arm64/kernel/smp_spin_table.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+DEFINE_PER_CPU(int, cold_boot_done);
+
+#if 0
+static int cold_boot_flags[] = {
+ 0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+};
+#endif
+
+/* CPU power domain register offsets */
+#define CPU_PWR_CTL 0x4
+#define CPU_PWR_GATE_CTL 0x14
+#define LDO_BHS_PWR_CTL 0x28
+
+/* L2 power domain register offsets */
+#define L2_PWR_CTL_OVERRIDE 0xc
+#define L2_PWR_CTL 0x14
+#define L2_PWR_STATUS 0x18
+#define L2_CORE_CBCR 0x58
+#define L1_RST_DIS 0x284
+
+#define L2_SPM_STS 0xc
+#define L2_VREG_CTL 0x1c
+
+#define SCM_IO_READ 1
+#define SCM_IO_WRITE 2
+
+/*
+ * struct msm_l2ccc_of_info: represents of data for l2 cache clock controller.
+ * @compat: compat string for l2 cache clock controller
+ * @l2_pon: l2 cache power on routine
+ */
+struct msm_l2ccc_of_info {
+ const char *compat;
+ int (*l2_power_on) (struct device_node *dn, u32 l2_mask, int cpu);
+ u32 l2_power_on_mask;
+};
+
+
+static int power_on_l2_msm8916(struct device_node *l2ccc_node, u32 pon_mask,
+ int cpu)
+{
+ u32 pon_status;
+ void __iomem *l2_base;
+
+ l2_base = of_iomap(l2ccc_node, 0);
+ if (!l2_base)
+ return -ENOMEM;
+
+ /* Skip power-on sequence if l2 cache is already powered up*/
+ pon_status = (__raw_readl(l2_base + L2_PWR_STATUS) & pon_mask)
+ == pon_mask;
+ if (pon_status) {
+ iounmap(l2_base);
+ return 0;
+ }
+
+ /* Close L2/SCU Logic GDHS and power up the cache */
+ writel_relaxed(0x10D700, l2_base + L2_PWR_CTL);
+
+ /* Assert PRESETDBGn */
+ writel_relaxed(0x400000, l2_base + L2_PWR_CTL_OVERRIDE);
+ mb();
+ udelay(2);
+
+ /* De-assert L2/SCU memory Clamp */
+ writel_relaxed(0x101700, l2_base + L2_PWR_CTL);
+
+ /* Wakeup L2/SCU RAMs by deasserting sleep signals */
+ writel_relaxed(0x101703, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* Enable clocks via SW_CLK_EN */
+ writel_relaxed(0x01, l2_base + L2_CORE_CBCR);
+
+ /* De-assert L2/SCU logic clamp */
+ writel_relaxed(0x101603, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert PRESSETDBg */
+ writel_relaxed(0x0, l2_base + L2_PWR_CTL_OVERRIDE);
+
+ /* De-assert L2/SCU Logic reset */
+ writel_relaxed(0x100203, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(54);
+
+ /* Turn on the PMIC_APC */
+ writel_relaxed(0x10100203, l2_base + L2_PWR_CTL);
+
+ /* Set H/W clock control for the cpu CBC block */
+ writel_relaxed(0x03, l2_base + L2_CORE_CBCR);
+ mb();
+ iounmap(l2_base);
+
+ return 0;
+}
+
+static const struct msm_l2ccc_of_info l2ccc_info[] = {
+ {
+ .compat = "qcom,8916-l2ccc",
+ .l2_power_on = power_on_l2_msm8916,
+ .l2_power_on_mask = BIT(9),
+ },
+};
+
+static int power_on_l2_cache(struct device_node *l2ccc_node, int cpu)
+{
+ int ret, i;
+ const char *compat;
+
+ ret = of_property_read_string(l2ccc_node, "compatible", &compat);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(l2ccc_info); i++) {
+ const struct msm_l2ccc_of_info *ptr = &l2ccc_info[i];
+
+ if (!of_compat_cmp(ptr->compat, compat, strlen(compat)))
+ return ptr->l2_power_on(l2ccc_node,
+ ptr->l2_power_on_mask, cpu);
+ }
+ pr_err("Compat string not found for L2CCC node\n");
+ return -EIO;
+}
+
+static int msm_unclamp_secondary_arm_cpu(unsigned int cpu)
+{
+
+ int ret = 0;
+ struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node;
+ void __iomem *reg;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0);
+ if (!l2_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0);
+ if (!l2ccc_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ /* Ensure L2-cache of the CPU is powered on before
+ * unclamping cpu power rails.
+ */
+ ret = power_on_l2_cache(l2ccc_node, cpu);
+ if (ret) {
+ pr_err("L2 cache power up failed for CPU%d\n", cpu);
+ goto out_l2ccc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc_reg;
+ }
+
+ /* Assert Reset on cpu-n */
+ writel_relaxed(0x00000033, reg + CPU_PWR_CTL);
+ mb();
+
+ /*Program skew to 16 X0 clock cycles*/
+ writel_relaxed(0x10000001, reg + CPU_PWR_GATE_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert coremem clamp */
+ writel_relaxed(0x00000031, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Close coremem array gdhs */
+ writel_relaxed(0x00000039, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n clamp */
+ writel_relaxed(0x00020038, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n reset */
+ writel_relaxed(0x00020008, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Assert PWRDUP signal on core-n */
+ writel_relaxed(0x00020088, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Secondary CPU-N is now alive */
+ iounmap(reg);
+out_acc_reg:
+ of_node_put(l2ccc_node);
+out_l2ccc:
+ of_node_put(l2_node);
+out_l2:
+ of_node_put(acc_node);
+out_acc:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ smp_wmb();
+ __flush_dcache_area(start, size);
+}
+
+static int secondary_pen_release(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+ write_pen_release(cpu_logical_map(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+static int __init msm_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ /* Mark CPU0 cold boot flag as done */
+ if (!cpu && !per_cpu(cold_boot_done, cpu))
+ per_cpu(cold_boot_done, cpu) = true;
+
+ return 0;
+}
+
+static int __init msm_cpu_prepare(unsigned int cpu)
+{
+ const cpumask_t *mask = cpumask_of(cpu);
+
+ if (qcom_scm_set_cold_boot_addr(secondary_holding_pen, mask)) {
+ pr_warn("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int msm_cpu_boot(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ ret = msm_unclamp_secondary_arm_cpu(cpu);
+ if (ret)
+ return ret;
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return secondary_pen_release(cpu);
+}
+
+void msm_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+static const struct cpu_operations msm_cortex_a_ops = {
+ .name = "qcom,arm-cortex-acc",
+ .cpu_init = msm_cpu_init,
+ .cpu_prepare = msm_cpu_prepare,
+ .cpu_boot = msm_cpu_boot,
+ .cpu_postboot = msm_cpu_postboot,
+};
+CPU_METHOD_OF_DECLARE(msm_cortex_a_ops, &msm_cortex_a_ops);
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
new file mode 100644
index 0000000000000..ed92d8680accf
--- /dev/null
+++ b/drivers/soc/qcom/glink.c
@@ -0,0 +1,5797 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/rwsem.h>
+#include <linux/pm_qos.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+/* Number of internal IPC Logging log pages */
+#define NUM_LOG_PAGES 10
+#define GLINK_PM_QOS_HOLDOFF_MS 10
+#define GLINK_QOS_DEF_NUM_TOKENS 10
+#define GLINK_QOS_DEF_NUM_PRIORITY 1
+#define GLINK_QOS_DEF_MTU 2048
+
+/**
+ * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
+ * @max_rate_kBps: Maximum rate supported by the priority bucket.
+ * @power_state: Transport power state for this priority bin.
+ * @tx_ready: List of channels ready for tx in the priority bucket.
+ * @active_ch_cnt: Active channels of this priority.
+ */
+struct glink_qos_priority_bin {
+ unsigned long max_rate_kBps;
+ uint32_t power_state;
+ struct list_head tx_ready;
+ uint32_t active_ch_cnt;
+};
+
+/**
+ * struct glink_core_xprt_ctx - transport representation structure
+ * @xprt_state_lhb0: controls read/write access to transport state
+ * @list_node: used to chain this transport in a global
+ * transport list
+ * @name: name of this transport
+ * @edge: what this transport connects to
+ * @id: the id to use for channel migration
+ * @versions: array of transport versions this implementation
+ * supports
+ * @versions_entries: number of entries in @versions
+ * @local_version_idx: local version index into @versions this
+ * transport is currently running
+ * @remote_version_idx: remote version index into @versions this
+ * transport is currently running
+ * @l_features: Features negotiated by the local side
+ * @capabilities: Capabilities of underlying transport
+ * @ops: transport defined implementation of common
+ * operations
+ * @local_state: value from local_channel_state_e representing
+ * the local state of this transport
+ * @remote_neg_completed: is the version negotiation with the remote end
+ * completed
+ * @xprt_ctx_lock_lhb1 lock to protect @next_lcid and @channels
+ * @next_lcid: logical channel identifier to assign to the next
+ * created channel
+ * @max_cid: maximum number of channel identifiers supported
+ * @max_iid: maximum number of intent identifiers supported
+ * @tx_work: work item to process @tx_ready
+ * @tx_wq: workqueue to run @tx_work
+ * @channels: list of all existing channels on this transport
+ * @mtu: MTU supported by this transport.
+ * @token_count: Number of tokens to be assigned per assignment.
+ * @curr_qos_rate_kBps: Aggregate of currently supported QoS requests.
+ * @threshold_rate_kBps: Maximum Rate allocated for QoS traffic.
+ * @num_priority: Number of priority buckets in the transport.
+ * @tx_ready_lock_lhb2: lock to protect @tx_ready
+ * @active_high_prio: Highest priority of active channels.
+ * @prio_bin: Pointer to priority buckets.
+ * @pm_qos_req: power management QoS request for TX path
+ * @qos_req_active: a vote is active with the PM QoS system
+ * @tx_path_activity: transmit activity has occurred
+ * @pm_qos_work: removes PM QoS vote due to inactivity
+ * @xprt_dbgfs_lock_lhb3: debugfs channel structure lock
+ * @log_ctx: IPC logging context for this transport.
+ */
+struct glink_core_xprt_ctx {
+ struct rwref_lock xprt_state_lhb0;
+ struct list_head list_node;
+ char name[GLINK_NAME_SIZE];
+ char edge[GLINK_NAME_SIZE];
+ uint16_t id;
+ const struct glink_core_version *versions;
+ size_t versions_entries;
+ uint32_t local_version_idx;
+ uint32_t remote_version_idx;
+ uint32_t l_features;
+ uint32_t capabilities;
+ struct glink_transport_if *ops;
+ enum transport_state_e local_state;
+ bool remote_neg_completed;
+
+ spinlock_t xprt_ctx_lock_lhb1;
+ struct list_head channels;
+ uint32_t next_lcid;
+ struct list_head free_lcid_list;
+
+ uint32_t max_cid;
+ uint32_t max_iid;
+ struct work_struct tx_work;
+ struct workqueue_struct *tx_wq;
+
+ size_t mtu;
+ uint32_t token_count;
+ unsigned long curr_qos_rate_kBps;
+ unsigned long threshold_rate_kBps;
+ uint32_t num_priority;
+ spinlock_t tx_ready_lock_lhb2;
+ uint32_t active_high_prio;
+ struct glink_qos_priority_bin *prio_bin;
+
+ struct pm_qos_request pm_qos_req;
+ bool qos_req_active;
+ bool tx_path_activity;
+ struct delayed_work pm_qos_work;
+
+ struct mutex xprt_dbgfs_lock_lhb3;
+ void *log_ctx;
+};
+
+/**
+ * Channel Context
+ * @xprt_state_lhb0: controls read/write access to channel state
+ * @port_list_node: channel list node used by transport "channels" list
+ * @tx_ready_list_node: channels that have data ready to transmit
+ * @name: name of the channel
+ *
+ * @user_priv: user opaque data type passed into glink_open()
+ * @notify_rx: RX notification function
+ * @notify_tx_done: TX-done notification function (remote side is done)
+ * @notify_state: Channel state (connected / disconnected) notifications
+ * @notify_rx_intent_req: Request from remote side for an intent
+ * @notify_rxv: RX notification function (for io buffer chain)
+ * @notify_rx_sigs: RX signal change notification
+ * @notify_rx_abort: Channel close RX Intent aborted
+ * @notify_tx_abort: Channel close TX aborted
+ * @notify_rx_tracer_pkt: Receive notification for tracer packet
+ * @notify_remote_rx_intent: Receive notification for remote-queued RX intent
+ *
+ * @transport_ptr: Transport this channel uses
+ * @lcid: Local channel ID
+ * @rcid: Remote channel ID
+ * @local_open_state: Local channel state
+ * @remote_opened: Remote channel state (opened or closed)
+ * @int_req_ack: Remote side intent request ACK state
+ * @int_req_ack_complete: Intent tracking completion - received remote ACK
+ * @int_req_complete: Intent tracking completion - received intent
+ * @rx_intent_req_timeout_jiffies: Timeout for requesting an RX intent, in
+ * jiffies; if set to 0, timeout is infinite
+ *
+ * @local_rx_intent_lst_lock_lhc1: RX intent list lock
+ * @local_rx_intent_list: Active RX Intents queued by client
+ * @local_rx_intent_ntfy_list: Client notified, waiting for rx_done()
+ * @local_rx_intent_free_list: Available intent container structure
+ *
+ * @rmt_rx_intent_lst_lock_lhc2: Remote RX intent list lock
+ * @rmt_rx_intent_list: Remote RX intent list
+ *
+ * @max_used_liid: Maximum Local Intent ID used
+ * @dummy_riid: Dummy remote intent ID
+ *
+ * @tx_lists_lock_lhc3: TX list lock
+ * @tx_active: Ready to transmit
+ *
+ * @tx_pending_rmt_done_lock_lhc4: Remote-done list lock
+ * @tx_pending_remote_done: Transmitted, waiting for remote done
+ * @lsigs: Local signals
+ * @rsigs: Remote signals
+ * @pending_delete: waiting for channel to be deleted
+ * @no_migrate: The local client does not want to
+ * migrate transports
+ * @local_xprt_req: The transport the local side requested
+ * @local_xprt_resp: The response to @local_xprt_req
+ * @remote_xprt_req: The transport the remote side requested
+ * @remote_xprt_resp: The response to @remote_xprt_req
+ * @curr_priority: Channel's current priority.
+ * @initial_priority: Channel's initial priority.
+ * @token_count: Tokens for consumption by packet.
+ * @txd_len: Transmitted data size in the current
+ * token assignment cycle.
+ * @token_start_time: Time at which tokens are assigned.
+ * @req_rate_kBps: Current QoS request by the channel.
+ * @tx_intent_cnt: Intent count to transmit soon in future.
+ * @tx_cnt: Packets to be picked by tx scheduler.
+ */
+struct channel_ctx {
+ struct rwref_lock ch_state_lhc0;
+ struct list_head port_list_node;
+ struct list_head tx_ready_list_node;
+ char name[GLINK_NAME_SIZE];
+
+ /* user info */
+ void *user_priv;
+ int (*notify_rx)(void *handle, const void *data, size_t size);
+ void (*notify_tx_done)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr);
+ void (*notify_state)(void *handle, const void *priv, unsigned event);
+ bool (*notify_rx_intent_req)(void *handle, const void *priv,
+ size_t req_size);
+ void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset,
+ size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset,
+ size_t *size));
+ void (*notify_rx_sigs)(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs);
+ void (*notify_rx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_tx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size);
+ void (*notify_remote_rx_intent)(void *handle, const void *priv,
+ size_t size);
+
+ /* internal port state */
+ struct glink_core_xprt_ctx *transport_ptr;
+ uint32_t lcid;
+ uint32_t rcid;
+ enum local_channel_state_e local_open_state;
+ bool remote_opened;
+ bool int_req_ack;
+ struct completion int_req_ack_complete;
+ struct completion int_req_complete;
+ unsigned long rx_intent_req_timeout_jiffies;
+
+ spinlock_t local_rx_intent_lst_lock_lhc1;
+ struct list_head local_rx_intent_list;
+ struct list_head local_rx_intent_ntfy_list;
+ struct list_head local_rx_intent_free_list;
+
+ spinlock_t rmt_rx_intent_lst_lock_lhc2;
+ struct list_head rmt_rx_intent_list;
+
+ uint32_t max_used_liid;
+ uint32_t dummy_riid;
+
+ spinlock_t tx_lists_lock_lhc3;
+ struct list_head tx_active;
+
+ spinlock_t tx_pending_rmt_done_lock_lhc4;
+ struct list_head tx_pending_remote_done;
+
+ uint32_t lsigs;
+ uint32_t rsigs;
+ bool pending_delete;
+
+ bool no_migrate;
+ uint16_t local_xprt_req;
+ uint16_t local_xprt_resp;
+ uint16_t remote_xprt_req;
+ uint16_t remote_xprt_resp;
+
+ uint32_t curr_priority;
+ uint32_t initial_priority;
+ uint32_t token_count;
+ size_t txd_len;
+ unsigned long token_start_time;
+ unsigned long req_rate_kBps;
+ uint32_t tx_intent_cnt;
+ uint32_t tx_cnt;
+ void *drvdata;
+};
+
+static struct glink_core_if core_impl;
+static void *log_ctx;
+static unsigned glink_debug_mask = QCOM_GLINK_INFO;
+module_param_named(debug_mask, glink_debug_mask,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned glink_pm_qos;
+module_param_named(pm_qos_enable, glink_pm_qos,
+ uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+static LIST_HEAD(transport_list);
+
+/*
+ * Used while notifying the clients about link state events. Since the clients
+ * need to store the callback information temporarily and since all the
+ * existing accesses to transport list are in non-IRQ context, defining the
+ * transport_list_lock as a mutex.
+ */
+static DEFINE_MUTEX(transport_list_lock_lha0);
+
+struct link_state_notifier_info {
+ struct list_head list;
+ char transport[GLINK_NAME_SIZE];
+ char edge[GLINK_NAME_SIZE];
+ void (*glink_link_state_notif_cb)(
+ struct glink_link_state_cb_info *cb_info, void *priv);
+ void *priv;
+};
+static LIST_HEAD(link_state_notifier_list);
+static DEFINE_MUTEX(link_state_notifier_lock_lha1);
+
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+ const char *name,
+ bool initial_xprt,
+ uint16_t *best_id);
+
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt);
+
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t lcid);
+
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t rcid);
+
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info);
+
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info);
+
+static void tx_work_func(struct work_struct *work);
+
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ const char *name);
+
+static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t riid);
+
+static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t *riid_ptr, size_t *intent_size);
+
+static struct glink_core_rx_intent *ch_push_local_rx_intent(
+ struct channel_ctx *ctx, const void *pkt_priv, size_t size);
+
+static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent(
+ struct channel_ctx *ctx, uint32_t liid);
+
+static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *intent_ptr);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+ struct channel_ctx *ctx, const void *ptr);
+
+static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *liid_ptr, bool reuse);
+
+static struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+ struct channel_ctx *ctx);
+
+static void ch_purge_intent_lists(struct channel_ctx *ctx);
+
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+ struct channel_ctx *ctx,
+ uint32_t rcid);
+
+static bool ch_is_fully_opened(struct channel_ctx *ctx);
+static bool ch_is_fully_closed(struct channel_ctx *ctx);
+
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx,
+ uint32_t riid);
+
+static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+ struct glink_core_tx_pkt *tx_pkt);
+
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+ *if_ptr, uint32_t rcid, bool granted);
+
+static bool glink_core_remote_close_common(struct channel_ctx *ctx);
+
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+ enum glink_link_state link_state);
+
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_cancel_worker(struct work_struct *work);
+static bool ch_update_local_state(struct channel_ctx *ctx,
+ enum local_channel_state_e lstate);
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate);
+static void glink_core_deinit_xprt_qos_cfg(
+ struct glink_core_xprt_ctx *xprt_ptr);
+
+#define glink_prio_to_power_state(xprt_ctx, priority) \
+ ((xprt_ctx)->prio_bin[priority].power_state)
+
+#define GLINK_GET_CH_TX_STATE(ctx) \
+ ((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+
+/**
+ * glink_ssr() - Clean up locally for SSR by simulating remote close
+ * @subsystem: The name of the subsystem being restarted
+ *
+ * Call into the transport using the ssr(if_ptr) function to allow it to
+ * clean up any necessary structures, then simulate a remote close from
+ * subsystem for all channels on that edge.
+ *
+ * Return: Standard error codes.
+ */
+int glink_ssr(const char *subsystem)
+{
+ int ret = 0;
+ bool transport_found = false;
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+ struct channel_ctx *ch_ctx, *temp_ch_ctx;
+ uint32_t i;
+ unsigned long flags;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt_ctx, &transport_list, list_node) {
+ if (!strcmp(subsystem, xprt_ctx->edge) &&
+ xprt_is_fully_opened(xprt_ctx)) {
+ GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__);
+ spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb2,
+ flags);
+ for (i = 0; i < xprt_ctx->num_priority; i++)
+ list_for_each_entry_safe(ch_ctx, temp_ch_ctx,
+ &xprt_ctx->prio_bin[i].tx_ready,
+ tx_ready_list_node)
+ list_del_init(
+ &ch_ctx->tx_ready_list_node);
+ spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb2,
+ flags);
+
+ xprt_ctx->ops->ssr(xprt_ctx->ops);
+ transport_found = true;
+ }
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ if (!transport_found)
+ ret = -ENODEV;
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_ssr);
+
+/**
+ * glink_core_ch_close_ack_common() - handles the common operations during
+ * close ack.
+ * @ctx: Pointer to channel instance.
+ *
+ * Return: True if the channel is fully closed after the state change,
+ * false otherwise.
+ */
+static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx)
+{
+ bool is_fully_closed;
+
+ if (ctx == NULL)
+ return false;
+ is_fully_closed = ch_update_local_state(ctx, GLINK_CHANNEL_CLOSED);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
+ __func__);
+
+ if (ctx->notify_state) {
+ ctx->notify_state(ctx, ctx->user_priv,
+ GLINK_LOCAL_DISCONNECTED);
+ ch_purge_intent_lists(ctx);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: notify state: GLINK_LOCAL_DISCONNECTED\n",
+ __func__);
+ }
+
+ return is_fully_closed;
+}
+
+/**
+ * glink_core_remote_close_common() - Handles the common operations during
+ * a remote close.
+ * @ctx: Pointer to channel instance.
+ *
+ * Return: True if the channel is fully closed after the state change,
+ * false otherwise.
+ */
+static bool glink_core_remote_close_common(struct channel_ctx *ctx)
+{
+ bool is_fully_closed;
+
+ if (ctx == NULL)
+ return false;
+ is_fully_closed = ch_update_rmt_state(ctx, false);
+ ctx->rcid = 0;
+
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
+ ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+ if (ctx->notify_state)
+ ctx->notify_state(ctx, ctx->user_priv,
+ GLINK_REMOTE_DISCONNECTED);
+ GLINK_INFO_CH(ctx,
+ "%s: %s: GLINK_REMOTE_DISCONNECTED\n",
+ __func__, "notify state");
+ }
+
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ GLINK_INFO_CH(ctx,
+ "%s: %s, %s\n", __func__,
+ "Did not send GLINK_REMOTE_DISCONNECTED",
+ "local state is already CLOSED");
+
+ ctx->int_req_ack = false;
+ complete_all(&ctx->int_req_ack_complete);
+ complete_all(&ctx->int_req_complete);
+ ch_purge_intent_lists(ctx);
+
+ return is_fully_closed;
+}
+
+/**
+ * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps
+ * @pkt_size: Worst case packet size per transmission.
+ * @interval_us: Packet transmit interval in us.
+ *
+ * This function is used to calculate the rate of transmission rate of
+ * a channel in kBps.
+ *
+ * Return: Transmission rate in kBps.
+ */
+static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size,
+ unsigned long interval_us)
+{
+ unsigned long rate_kBps, rem;
+
+ rate_kBps = pkt_size * USEC_PER_SEC;
+ rem = do_div(rate_kBps, (interval_us * 1024));
+ return rate_kBps;
+}
+
+/**
+ * glink_qos_check_feasibility() - Feasibility test on a QoS Request
+ * @xprt_ctx: Transport in which the QoS request is made.
+ * @req_rate_kBps: QoS Request.
+ *
+ * This function is used to perform the schedulability test on a QoS request
+ * over a specific transport.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx,
+ unsigned long req_rate_kBps)
+{
+ unsigned long new_rate_kBps;
+
+ if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY)
+ return -EOPNOTSUPP;
+
+ new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps;
+ if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) {
+ GLINK_ERR_XPRT(xprt_ctx,
+ "New_rate(%lu + %lu) > threshold_rate(%lu)\n",
+ xprt_ctx->curr_qos_rate_kBps, req_rate_kBps,
+ xprt_ctx->threshold_rate_kBps);
+ return -EBUSY;
+ }
+ return 0;
+}
+
+/**
+ * glink_qos_update_ch_prio() - Update the channel priority
+ * @ctx: Channel context whose priority is updated.
+ * @new_priority: New priority of the channel.
+ *
+ * This function is called to update the channel priority during QoS request,
+ * QoS Cancel or Priority evaluation by packet scheduler. This function must
+ * be called with transport's tx_ready_lock_lhb2 lock and channel's
+ * tx_lists_lock_lhc3 locked.
+ */
+static void glink_qos_update_ch_prio(struct channel_ctx *ctx,
+ uint32_t new_priority)
+{
+ uint32_t old_priority;
+
+ if (unlikely(!ctx))
+ return;
+
+ old_priority = ctx->curr_priority;
+ if (!list_empty(&ctx->tx_ready_list_node)) {
+ ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--;
+ list_move(&ctx->tx_ready_list_node,
+ &ctx->transport_ptr->prio_bin[new_priority].tx_ready);
+ ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++;
+ }
+ ctx->curr_priority = new_priority;
+}
+
+/**
+ * glink_qos_assign_priority() - Assign priority to a channel
+ * @ctx: Channel for which the priority has to be assigned.
+ * @req_rate_kBps: QoS request by the channel.
+ *
+ * This function is used to assign a priority to the channel depending on its
+ * QoS Request.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_assign_priority(struct channel_ctx *ctx,
+ unsigned long req_rate_kBps)
+{
+ int ret;
+ uint32_t i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ if (ctx->req_rate_kBps) {
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ return ret;
+ }
+
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ i = ctx->transport_ptr->num_priority - 1;
+ while (i > 0 &&
+ ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps)
+ i--;
+
+ ctx->initial_priority = i;
+ glink_qos_update_ch_prio(ctx, i);
+ ctx->req_rate_kBps = req_rate_kBps;
+ if (i > 0) {
+ ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps;
+ ctx->token_count = ctx->transport_ptr->token_count;
+ ctx->txd_len = 0;
+ ctx->token_start_time = arch_counter_get_cntpct();
+ }
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return 0;
+}
+
+/**
+ * glink_qos_reset_priority() - Reset the channel priority
+ * @ctx: Channel for which the priority is reset.
+ *
+ * This function is used to reset the channel priority when the QoS request
+ * is cancelled by the channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_reset_priority(struct channel_ctx *ctx)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ if (ctx->initial_priority > 0) {
+ ctx->initial_priority = 0;
+ glink_qos_update_ch_prio(ctx, 0);
+ ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps;
+ ctx->txd_len = 0;
+ ctx->req_rate_kBps = 0;
+ }
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return 0;
+}
+
+/**
+ * glink_qos_ch_vote_xprt() - Vote the transport that channel is active
+ * @ctx: Channel context which is active.
+ *
+ * This function is called to vote for the transport either when the channel
+ * is transmitting or when it shows an intention to transmit sooner. This
+ * function must be called with transport's tx_ready_lock_lhb2 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx)
+{
+ uint32_t prio;
+
+ if (unlikely(!ctx || !ctx->transport_ptr))
+ return -EINVAL;
+
+ prio = ctx->curr_priority;
+ ctx->transport_ptr->prio_bin[prio].active_ch_cnt++;
+
+ if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 &&
+ ctx->transport_ptr->active_high_prio < prio) {
+ /*
+ * One active channel in this priority and this is the
+ * highest active priority bucket
+ */
+ ctx->transport_ptr->active_high_prio = prio;
+ return ctx->transport_ptr->ops->power_vote(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ prio));
+ }
+ return 0;
+}
+
+/**
+ * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive
+ * @ctx: Channel context which is inactive.
+ *
+ * This function is called to unvote for the transport either when all the
+ * packets queued by the channel are transmitted by the scheduler. This
+ * function must be called with transport's tx_ready_lock_lhb2 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx)
+{
+ uint32_t prio;
+
+ if (unlikely(!ctx || !ctx->transport_ptr))
+ return -EINVAL;
+
+ prio = ctx->curr_priority;
+ ctx->transport_ptr->prio_bin[prio].active_ch_cnt--;
+
+ if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt ||
+ ctx->transport_ptr->active_high_prio > prio)
+ return 0;
+
+ /*
+ * No active channel in this priority and this is the
+ * highest active priority bucket
+ */
+ while (prio > 0) {
+ prio--;
+ if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt)
+ continue;
+
+ ctx->transport_ptr->active_high_prio = prio;
+ return ctx->transport_ptr->ops->power_vote(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ prio));
+ }
+ return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops);
+}
+
+/**
+ * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon
+ * @ctx: Channel context which is going to be active.
+ *
+ * This function is called to update the channel state when it is intending to
+ * transmit sooner. This function must be called with transport's
+ * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ ctx->tx_intent_cnt++;
+ if (!active_tx)
+ glink_qos_ch_vote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting
+ * @ctx: Channel context which is transmitting.
+ *
+ * This function is called to update the channel state when it is queueing a
+ * packet to transmit. This function must be called with transport's
+ * tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_do_ch_tx(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ ctx->tx_cnt++;
+ if (ctx->tx_intent_cnt)
+ ctx->tx_intent_cnt--;
+ if (!active_tx)
+ glink_qos_ch_vote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * glink_qos_done_ch_tx() - Update the channel's state when transmission is done
+ * @ctx: Channel context for which all packets are transmitted.
+ *
+ * This function is called to update the channel state when all packets in its
+ * transmit queue are successfully transmitted. This function must be called
+ * with transport's tx_ready_lock_lhb2 lock and channel's tx_lists_lock_lhc3
+ * locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_done_ch_tx(struct channel_ctx *ctx)
+{
+ bool active_tx;
+
+ if (unlikely(!ctx))
+ return -EINVAL;
+
+ WARN_ON(ctx->tx_cnt == 0);
+ ctx->tx_cnt = 0;
+ active_tx = GLINK_GET_CH_TX_STATE(ctx);
+ if (!active_tx)
+ glink_qos_ch_unvote_xprt(ctx);
+ return 0;
+}
+
+/**
+ * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * @iovec: Pointer to the beginning of the linear buffer.
+ * @offset: Offset into the buffer whose address is needed.
+ * @size: Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is transmitted.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ * of the buffer.
+ */
+static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size)
+{
+ struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec;
+
+ if (unlikely(!iovec || !size))
+ return NULL;
+
+ if (offset >= tx_info->size)
+ return NULL;
+
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset)))
+ return NULL;
+
+ *size = tx_info->size - offset;
+
+ return (void *)tx_info->data + offset;
+}
+
+/**
+ * linearize_vector() - Linearize the vector buffer
+ * @iovec: Pointer to the vector buffer.
+ * @size: Size of data in the vector buffer.
+ * vbuf_provider: Virtual address-space Buffer Provider for the vector.
+ * pbuf_provider: Physical address-space Buffer Provider for the vector.
+ *
+ * This function is used to linearize the vector buffer provided by the
+ * transport when the client has registered to receive only the vector
+ * buffer.
+ *
+ * Return: address of the linear buffer on success, NULL on failure.
+ */
+static void *linearize_vector(void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size))
+{
+ void *bounce_buf;
+ void *pdata;
+ void *vdata;
+ size_t data_size;
+ size_t offset = 0;
+
+ bounce_buf = kmalloc(size, GFP_KERNEL);
+ if (!bounce_buf)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ if (vbuf_provider) {
+ vdata = vbuf_provider(iovec, offset, &data_size);
+ } else {
+ pdata = pbuf_provider(iovec, offset, &data_size);
+ vdata = phys_to_virt((unsigned long)pdata);
+ }
+
+ if (!vdata)
+ break;
+
+ if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) {
+ GLINK_ERR("%s: overflow data_size %zu + offset %zu\n",
+ __func__, data_size, offset);
+ goto err;
+ }
+
+ memcpy(bounce_buf + offset, vdata, data_size);
+ offset += data_size;
+ } while (offset < size);
+
+ if (offset != size) {
+ GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n",
+ __func__, offset, size);
+ goto err;
+ }
+ return bounce_buf;
+
+err:
+ kfree(bounce_buf);
+ return NULL;
+}
+
+/**
+ * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @lcid: Local channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @lcid or NULL if a matching channel
+ * is not found.
+ */
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t lcid)
+{
+ struct channel_ctx *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+ if (entry->lcid == lcid) {
+ rwref_get(&entry->ch_state_lhc0);
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ return entry;
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+ return NULL;
+}
+
+/**
+ * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @rcid: Remote channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @rcid or NULL if a matching channel
+ * is not found.
+ */
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ uint32_t rcid)
+{
+ struct channel_ctx *entry;
+ unsigned long flags;
+
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+ if (entry->rcid == rcid) {
+ rwref_get(&entry->ch_state_lhc0);
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ return entry;
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+ return NULL;
+}
+
+/**
+ * ch_check_duplicate_riid() - Checks for duplicate riid
+ * @ctx: Local channel context
+ * @riid: Remote intent ID
+ *
+ * This functions check the riid is present in the remote_rx_list or not
+ */
+bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) {
+ if (riid == intent->id) {
+ spin_unlock_irqrestore(
+ &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return false;
+}
+
+/**
+ * ch_pop_remote_rx_intent() - Finds a matching RX intent
+ * @ctx: Local channel context
+ * @size: Size of Intent
+ * @riid_ptr: Pointer to return value of remote intent ID
+ *
+ * This functions searches for an RX intent that is >= to the requested size.
+ */
+int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t *riid_ptr, size_t *intent_size)
+{
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *intent_tmp;
+ unsigned long flags;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__,
+ size);
+ return -EINVAL;
+ }
+
+ if (riid_ptr == NULL)
+ return -EINVAL;
+
+ *riid_ptr = 0;
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+ *riid_ptr = ++ctx->dummy_riid;
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2,
+ flags);
+ return 0;
+ }
+ list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
+ list) {
+ if (intent->intent_size >= size) {
+ list_del(&intent->list);
+ GLINK_DBG_CH(ctx,
+ "%s: R[%u]:%zu Removed remote intent\n",
+ __func__,
+ intent->id,
+ intent->intent_size);
+ *riid_ptr = intent->id;
+ *intent_size = intent->intent_size;
+ kfree(intent);
+ spin_unlock_irqrestore(
+ &ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ return -EAGAIN;
+}
+
+/**
+ * ch_push_remote_rx_intent() - Registers a remote RX intent
+ * @ctx: Local channel context
+ * @size: Size of Intent
+ * @riid: Remote intent ID
+ *
+ * This functions adds a remote RX intent to the remote RX intent list.
+ */
+void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+ uint32_t riid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+ gfp_t gfp_flag;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__,
+ riid, size);
+ return;
+ }
+
+ if (ch_check_duplicate_riid(ctx, riid)) {
+ GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n",
+ __func__, riid, size);
+ return;
+ }
+
+ gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ?
+ GFP_ATOMIC : GFP_KERNEL;
+ intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: R[%u]:%zu Memory allocation for intent failed\n",
+ __func__, riid, size);
+ return;
+ }
+ intent->id = riid;
+ intent->intent_size = size;
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
+
+ complete_all(&ctx->int_req_complete);
+ if (ctx->notify_remote_rx_intent)
+ ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size);
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+
+ GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+}
+
+/**
+ * ch_push_local_rx_intent() - Create an rx_intent
+ * @ctx: Local channel context
+ * @pkt_priv: Opaque private pointer provided by client to be returned later
+ * @size: Size of intent
+ *
+ * This functions creates a local intent and adds it to the local
+ * intent list.
+ */
+struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx,
+ const void *pkt_priv, size_t size)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+ int ret;
+
+ if (GLINK_MAX_PKT_SIZE < size) {
+ GLINK_ERR_CH(ctx,
+ "%s: L[]:%zu Invalid size\n", __func__, size);
+ return NULL;
+ }
+
+ intent = ch_get_free_local_rx_intent(ctx);
+ if (!intent) {
+ if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) {
+ GLINK_ERR_CH(ctx,
+ "%s: All intents are in USE max_iid[%d]",
+ __func__, ctx->transport_ptr->max_iid);
+ return NULL;
+ }
+
+ intent = kzalloc(sizeof(struct glink_core_rx_intent),
+ GFP_KERNEL);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: Memory Allocation for local rx_intent failed",
+ __func__);
+ return NULL;
+ }
+ intent->id = ++ctx->max_used_liid;
+ }
+
+ /* transport is responsible for allocating/reserving for the intent */
+ ret = ctx->transport_ptr->ops->allocate_rx_intent(
+ ctx->transport_ptr->ops, size, intent);
+ if (ret < 0) {
+ /* intent data allocation failure */
+ GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d",
+ __func__, size, ret);
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return NULL;
+ }
+
+ intent->pkt_priv = pkt_priv;
+ intent->intent_size = size;
+ intent->write_offset = 0;
+ intent->pkt_size = 0;
+ intent->bounce_buf = NULL;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+ return intent;
+}
+
+/**
+ * ch_remove_local_rx_intent() - Find and remove RX Intent from list
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, the intent
+ * is deleted from the list.
+ */
+void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid)
+{
+ struct glink_core_rx_intent *intent, *tmp_intent;
+ unsigned long flags;
+
+ if (ctx->transport_ptr->max_iid < liid) {
+ GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+ liid);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+ list) {
+ if (liid == intent->id) {
+ list_del(&intent->list);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu moved intent to Free/unused list\n",
+ __func__,
+ intent->id,
+ intent->intent_size);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid);
+}
+
+/**
+ * ch_get_dummy_rx_intent() - Get a dummy rx_intent
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel and
+ * returns either a matching intent or allocates a dummy one if no matching
+ * intents can be found.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx,
+ uint32_t liid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ if (!list_empty(&ctx->local_rx_intent_list)) {
+ intent = list_first_entry(&ctx->local_rx_intent_list,
+ struct glink_core_rx_intent, list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return intent;
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ intent = ch_get_free_local_rx_intent(ctx);
+ if (!intent) {
+ intent = kzalloc(sizeof(struct glink_core_rx_intent),
+ GFP_ATOMIC);
+ if (!intent) {
+ GLINK_ERR_CH(ctx,
+ "%s: Memory Allocation for local rx_intent failed",
+ __func__);
+ return NULL;
+ }
+ intent->id = ++ctx->max_used_liid;
+ }
+ intent->intent_size = 0;
+ intent->write_offset = 0;
+ intent->pkt_size = 0;
+ intent->bounce_buf = NULL;
+ intent->pkt_priv = NULL;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+ intent->id,
+ intent->intent_size);
+ return intent;
+}
+
+/**
+ * ch_get_local_rx_intent() - Search for an rx_intent
+ * @ctx: Local channel context
+ * @liid: Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, pointer to
+ * the intent is returned.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx,
+ uint32_t liid)
+{
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ if (ctx->transport_ptr->max_iid < liid) {
+ GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+ liid);
+ return NULL;
+ }
+
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ return ch_get_dummy_rx_intent(ctx, liid);
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+ if (liid == intent->id) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return intent;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid);
+ return NULL;
+}
+
+/**
+ * ch_set_local_rx_intent_notified() - Add a rx intent to local intent
+ * notified list
+ * @ctx: Local channel context
+ * @intent_ptr: Pointer to the local intent
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent list and adds it to local_rx_intent_notified list.
+ */
+void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *intent_ptr)
+{
+ struct glink_core_rx_intent *tmp_intent, *intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+ list) {
+ if (intent == intent_ptr) {
+ list_del(&intent->list);
+ list_add_tail(&intent->list,
+ &ctx->local_rx_intent_ntfy_list);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu Moved intent %s",
+ __func__,
+ intent_ptr->id,
+ intent_ptr->intent_size,
+ "from local to notify list\n");
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ intent_ptr->id);
+}
+
+/**
+ * ch_get_local_rx_intent_notified() - Find rx intent in local notified list
+ * @ctx: Local channel context
+ * @ptr: Pointer to the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent.
+ *
+ * Return: Pointer to the intent if intent is found else NULL.
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+ struct channel_ctx *ctx, const void *ptr)
+{
+ struct glink_core_rx_intent *ptr_intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list,
+ list) {
+ if (ptr_intent->data == ptr || ptr_intent->iovec == ptr ||
+ ptr_intent->bounce_buf == ptr) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return ptr_intent;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__);
+ return NULL;
+}
+
+/**
+ * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent
+ * notified list
+ * @ctx: Local channel context
+ * @ptr: Pointer to the rx intent
+ * @reuse: Reuse the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent_notified list and adds it to local_rx_intent_free list.
+ */
+void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+ struct glink_core_rx_intent *liid_ptr, bool reuse)
+{
+ struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_ntfy_list, list) {
+ if (ptr_intent == liid_ptr) {
+ list_del(&ptr_intent->list);
+ GLINK_DBG_CH(ctx,
+ "%s: L[%u]:%zu Removed intent from notify list\n",
+ __func__,
+ ptr_intent->id,
+ ptr_intent->intent_size);
+ kfree(ptr_intent->bounce_buf);
+ ptr_intent->bounce_buf = NULL;
+ ptr_intent->write_offset = 0;
+ ptr_intent->pkt_size = 0;
+ if (reuse)
+ list_add_tail(&ptr_intent->list,
+ &ctx->local_rx_intent_list);
+ else
+ list_add_tail(&ptr_intent->list,
+ &ctx->local_rx_intent_free_list);
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1,
+ flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+ liid_ptr->id);
+}
+
+/**
+ * ch_get_free_local_rx_intent() - Return a rx intent in local intent
+ * free list
+ * @ctx: Local channel context
+ *
+ * This functions parses the local_rx_intent_free list for a specific channel
+ * and checks for the free unused intent. If found, the function returns
+ * the free intent pointer else NULL pointer.
+ */
+struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+ struct channel_ctx *ctx)
+{
+ struct glink_core_rx_intent *ptr_intent = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ if (!list_empty(&ctx->local_rx_intent_free_list)) {
+ ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list,
+ struct glink_core_rx_intent,
+ list);
+ list_del(&ptr_intent->list);
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return ptr_intent;
+}
+
+/**
+ * ch_purge_intent_lists() - Remove all intents for a channel
+ *
+ * @ctx: Local channel context
+ *
+ * This functions parses the local intent lists for a specific channel and
+ * removes and frees all intents.
+ */
+void ch_purge_intent_lists(struct channel_ctx *ctx)
+{
+ struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+ struct glink_core_tx_pkt *tx_info, *tx_info_temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active,
+ list_node) {
+ ctx->notify_tx_abort(ctx, ctx->user_priv,
+ tx_info->pkt_priv);
+ rwref_put(&tx_info->pkt_ref);
+ }
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_list, list) {
+ ctx->notify_rx_abort(ctx, ctx->user_priv,
+ ptr_intent->pkt_priv);
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+
+ if (!list_empty(&ctx->local_rx_intent_ntfy_list))
+ /*
+ * The client is still processing an rx_notify() call and has
+ * not yet called glink_rx_done() to return the pointer to us.
+ * glink_rx_done() will do the appropriate cleanup when this
+ * call occurs, but log a message here just for internal state
+ * tracking.
+ */
+ GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n",
+ __func__);
+
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->local_rx_intent_free_list, list) {
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+ ctx->max_used_liid = 0;
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+ list_for_each_entry_safe(ptr_intent, tmp_intent,
+ &ctx->rmt_rx_intent_list, list) {
+ list_del(&ptr_intent->list);
+ kfree(ptr_intent);
+ }
+ spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+}
+
+/**
+ * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for
+ * the remote-done notification.
+ * @ctx: Pointer to the channel context
+ * @riid: riid of transmit packet
+ *
+ * This function adds a packet to the tx_pending_remote_done list.
+ *
+ * The tx_lists_lock_lhc3 lock needs to be held while calling this function.
+ *
+ * Return: Pointer to the tx packet
+ */
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(
+ struct channel_ctx *ctx, uint32_t riid)
+{
+ struct glink_core_tx_pkt *tx_pkt;
+ unsigned long flags;
+
+ if (!ctx) {
+ GLINK_ERR("%s: Invalid context pointer", __func__);
+ return NULL;
+ }
+
+ spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) {
+ if (tx_pkt->riid == riid) {
+ if (tx_pkt->size_remaining) {
+ GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+ __func__, riid);
+ tx_pkt = NULL;
+ }
+ spin_unlock_irqrestore(
+ &ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ return tx_pkt;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+ GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
+ __func__, riid);
+ return NULL;
+}
+
+/**
+ * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a
+ * packet that is waiting for the remote-done notification
+ * @ctx: Pointer to the channel context
+ * @tx_pkt: Pointer to the transmit packet
+ *
+ * This function parses through tx_pending_remote_done and removes a
+ * packet that matches with the tx_pkt.
+ */
+void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+ struct glink_core_tx_pkt *tx_pkt)
+{
+ struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt;
+ unsigned long flags;
+
+ if (!ctx || !tx_pkt) {
+ GLINK_ERR("%s: Invalid input", __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+ &ctx->tx_pending_remote_done, list_done) {
+ if (tx_pkt == local_tx_pkt) {
+ list_del_init(&tx_pkt->list_done);
+ GLINK_DBG_CH(ctx,
+ "%s: R[%u] Removed Tx packet for intent\n",
+ __func__,
+ tx_pkt->riid);
+ rwref_put(&tx_pkt->pkt_ref);
+ spin_unlock_irqrestore(
+ &ctx->tx_pending_rmt_done_lock_lhc4, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+ GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
+ tx_pkt->riid);
+}
+
+/**
+ * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to
+ * available lcid list
+ * @ctx: Pointer to channel context.
+ */
+static void glink_add_free_lcid_list(struct channel_ctx *ctx)
+{
+ struct channel_lcid *free_lcid;
+ unsigned long flags;
+
+ free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL);
+ if (!free_lcid) {
+ GLINK_ERR(
+ "%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n",
+ __func__, ctx->transport_ptr->name,
+ ctx->transport_ptr->edge, ctx->lcid);
+ return;
+ }
+ free_lcid->lcid = ctx->lcid;
+ spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&free_lcid->list_node,
+ &ctx->transport_ptr->free_lcid_list);
+ spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+}
+
+/**
+ * glink_ch_ctx_release - Free the channel context
+ * @ch_st_lock: handle to the rwref_lock associated with the chanel
+ *
+ * This should only be called when the reference count associated with the
+ * channel goes to zero.
+ */
+static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock)
+{
+ struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx,
+ ch_state_lhc0);
+ ctx->transport_ptr = NULL;
+ kfree(ctx);
+ GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__,
+ current->pid);
+ ctx = NULL;
+}
+
+/**
+ * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
+ * it is not found.
+ * @xprt_ctx: Transport to search for a matching channel.
+ * @name: Name of the desired channel.
+ *
+ * Return: The channel corresponding to @name, NULL if a matching channel was
+ * not found AND a new channel could not be created.
+ */
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+ struct glink_core_xprt_ctx *xprt_ctx,
+ const char *name)
+{
+ struct channel_ctx *entry;
+ struct channel_ctx *ctx;
+ struct channel_ctx *temp;
+ unsigned long flags;
+ struct channel_lcid *flcid;
+
+ ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL);
+ if (!ctx) {
+ GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s",
+ "checking if there is one existing\n",
+ __func__);
+ goto check_ctx;
+ }
+
+ ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+ strlcpy(ctx->name, name, GLINK_NAME_SIZE);
+ rwref_lock_init(&ctx->ch_state_lhc0, glink_ch_ctx_release);
+ INIT_LIST_HEAD(&ctx->tx_ready_list_node);
+ init_completion(&ctx->int_req_ack_complete);
+ init_completion(&ctx->int_req_complete);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_list);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list);
+ INIT_LIST_HEAD(&ctx->local_rx_intent_free_list);
+ spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1);
+ INIT_LIST_HEAD(&ctx->rmt_rx_intent_list);
+ spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2);
+ INIT_LIST_HEAD(&ctx->tx_active);
+ spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4);
+ INIT_LIST_HEAD(&ctx->tx_pending_remote_done);
+ spin_lock_init(&ctx->tx_lists_lock_lhc3);
+
+check_ctx:
+ rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+ if (xprt_ctx->local_state != GLINK_XPRT_OPENED) {
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return NULL;
+ }
+ spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
+ port_list_node)
+ if (!strcmp(entry->name, name) && !entry->pending_delete) {
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return entry;
+ }
+
+ if (ctx) {
+ if (list_empty(&xprt_ctx->free_lcid_list)) {
+ if (xprt_ctx->next_lcid > xprt_ctx->max_cid) {
+ /* no more channels available */
+ GLINK_ERR_XPRT(xprt_ctx,
+ "%s: unable to exceed %u channels\n",
+ __func__, xprt_ctx->max_cid);
+ spin_unlock_irqrestore(
+ &xprt_ctx->xprt_ctx_lock_lhb1,
+ flags);
+ kfree(ctx);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ return NULL;
+ } else {
+ ctx->lcid = xprt_ctx->next_lcid++;
+ }
+ } else {
+ flcid = list_first_entry(&xprt_ctx->free_lcid_list,
+ struct channel_lcid, list_node);
+ ctx->lcid = flcid->lcid;
+ list_del(&flcid->list_node);
+ kfree(flcid);
+ }
+
+ list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
+
+ GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
+ "%s: local:GLINK_CHANNEL_CLOSED\n",
+ __func__);
+ }
+ spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb3);
+ if (ctx != NULL)
+ glink_debugfs_add_channel(ctx, xprt_ctx);
+ mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb3);
+ return ctx;
+}
+
+/**
+ * ch_add_rcid() - add a remote channel identifier to an existing channel
+ * @xprt_ctx: Transport the channel resides on.
+ * @ctx: Channel receiving the identifier.
+ * @rcid: The remote channel identifier.
+ */
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+ struct channel_ctx *ctx,
+ uint32_t rcid)
+{
+ ctx->rcid = rcid;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx: Pointer to channel context.
+ * @lstate: Local channel state.
+ *
+ * Return: True if the channel is fully closed as a result of this update,
+ * false otherwise.
+ */
+static bool ch_update_local_state(struct channel_ctx *ctx,
+ enum local_channel_state_e lstate)
+{
+ bool is_fully_closed;
+
+ rwref_write_get(&ctx->ch_state_lhc0);
+ ctx->local_open_state = lstate;
+ is_fully_closed = ch_is_fully_closed(ctx);
+ rwref_write_put(&ctx->ch_state_lhc0);
+
+ return is_fully_closed;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx: Pointer to channel context.
+ * @rstate: Remote Channel state.
+ *
+ * Return: True if the channel is fully closed as result of this update,
+ * false otherwise.
+ */
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate)
+{
+ bool is_fully_closed;
+
+ rwref_write_get(&ctx->ch_state_lhc0);
+ ctx->remote_opened = rstate;
+ is_fully_closed = ch_is_fully_closed(ctx);
+ rwref_write_put(&ctx->ch_state_lhc0);
+
+ return is_fully_closed;
+}
+
+/*
+ * ch_is_fully_opened() - Verify if a channel is open
+ * ctx: Pointer to channel context
+ *
+ * Return: True if open, else flase
+ */
+static bool ch_is_fully_opened(struct channel_ctx *ctx)
+{
+ if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED)
+ return true;
+
+ return false;
+}
+
+/*
+ * ch_is_fully_closed() - Verify if a channel is closed on both sides
+ * @ctx: Pointer to channel context
+ * @returns: True if open, else flase
+ */
+static bool ch_is_fully_closed(struct channel_ctx *ctx)
+{
+ if (!ctx->remote_opened &&
+ ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ return true;
+
+ return false;
+}
+
+/**
+ * find_open_transport() - find a specific open transport
+ * @edge: Edge the transport is on.
+ * @name: Name of the transport (or NULL if no preference)
+ * @initial_xprt: The specified transport is the start for migration
+ * @best_id: The best transport found for this connection
+ *
+ * Find an open transport corresponding to the specified @name and @edge. @edge
+ * is expected to be valid. @name is expected to be NULL (unspecified) or
+ * valid. If @name is not specified, then the best transport found on the
+ * specified edge will be returned.
+ *
+ * Return: Transport with the specified name on the specified edge, if open.
+ * NULL if the transport exists, but is not fully open. ENODEV if no such
+ * transport exists.
+ */
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+ const char *name,
+ bool initial_xprt,
+ uint16_t *best_id)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct glink_core_xprt_ctx *best_xprt = NULL;
+ struct glink_core_xprt_ctx *ret;
+ bool first = true;
+
+ ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV);
+ *best_id = USHRT_MAX;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node) {
+ if (strcmp(edge, xprt->edge))
+ continue;
+ if (first) {
+ first = false;
+ ret = NULL;
+ }
+ if (!xprt_is_fully_opened(xprt))
+ continue;
+
+ if (xprt->id < *best_id) {
+ *best_id = xprt->id;
+ best_xprt = xprt;
+ }
+
+ /*
+ * Braces are required in this instacne because the else will
+ * attach to the wrong if otherwise.
+ */
+ if (name) {
+ if (!strcmp(name, xprt->name))
+ ret = xprt;
+ } else {
+ ret = best_xprt;
+ }
+ }
+
+ mutex_unlock(&transport_list_lock_lha0);
+
+ if (IS_ERR_OR_NULL(ret))
+ return ret;
+ if (!initial_xprt)
+ *best_id = ret->id;
+
+ return ret;
+}
+
+/**
+ * xprt_is_fully_opened() - check the open status of a transport
+ * @xprt: Transport being checked.
+ *
+ * Return: True if the transport is fully opened, false otherwise.
+ */
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt)
+{
+ if (xprt->remote_neg_completed &&
+ xprt->local_state == GLINK_XPRT_OPENED)
+ return true;
+
+ return false;
+}
+
+/**
+ * glink_dummy_notify_rx_intent_req() - Dummy RX Request
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @req_size: Requested size (ignored)
+ *
+ * Dummy RX intent request if client does not implement the optional callback
+ * function.
+ *
+ * Return: False
+ */
+static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv,
+ size_t req_size)
+{
+ return false;
+}
+
+/**
+ * glink_dummy_notify_rx_sigs() - Dummy signal callback
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @req_size: Requested size (ignored)
+ *
+ * Dummy signal callback if client does not implement the optional callback
+ * function.
+ *
+ * Return: False
+ */
+static void glink_dummy_notify_rx_sigs(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * glink_dummy_rx_abort() - Dummy rx abort callback
+ *
+ * handle: Channel handle (ignored)
+ * priv: Private data pointer (ignored)
+ * pkt_priv: Private intent data pointer (ignored)
+ *
+ * Dummy rx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_rx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * glink_dummy_tx_abort() - Dummy tx abort callback
+ *
+ * @handle: Channel handle (ignored)
+ * @priv: Private data pointer (ignored)
+ * @pkt_priv: Private intent data pointer (ignored)
+ *
+ * Dummy tx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_tx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_poll() - a dummy poll() for transports that don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The channel to poll.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that
+ * don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @intent: The intent to reuse.
+ *
+ * Return: Success.
+ */
+static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define
+ * one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The local channel id for this channel.
+ * @mask: True to mask the irq, false to unmask.
+ * @pstruct: Platform defined structure with data necessary for masking.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't
+ * define one
+ * @if_ptr: The transport interface handle for this transport.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_wait_link_down(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does
+ * not allocate anything
+ * @if_ptr: The transport the intent is associated with.
+ * @size: Size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: Success.
+ */
+static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr,
+ size_t size, struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports
+ * that don't define one
+ * @if_ptr: The transport interface handle for this transport.
+ * @lcid: The channel in which the tracer packet is transmitted.
+ * @pctx: Context of the packet to be transmitted.
+ *
+ * Return: 0.
+ */
+static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ pctx->size_remaining = 0;
+ return 0;
+}
+
+/**
+ * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that
+ * does not deallocate anything
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: Success.
+ */
+static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_done() - dummy rx done command
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_tx() - dummy tx() that does not send anything
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written i.e. zero.
+ */
+static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_remote_rx_intent_req_ack(
+ struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t sigs)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_close() - dummy channel close transmit function
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: Success.
+ */
+static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr,
+ uint32_t lcid)
+{
+ return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ /* intentionally left blank */
+}
+
+/**
+ * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time
+ * @if_ptr: The transport to transmit on.
+ * @state: The power state being requested from the transport.
+ */
+static unsigned long dummy_get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return (unsigned long)-EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_vote() - Dummy Power vote operation
+ * @if_ptr: The transport to transmit on.
+ * @state: The power state being requested from the transport.
+ */
+static int dummy_power_vote(struct glink_transport_if *if_ptr,
+ uint32_t state)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_unvote() - Dummy Power unvote operation
+ * @if_ptr: The transport to transmit on.
+ */
+static int dummy_power_unvote(struct glink_transport_if *if_ptr)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
+ * notif_if_up_all_xprts() - Check and notify existing transport state if up
+ * @notif_info: Data structure containing transport information to be notified.
+ *
+ * This function is called when the client registers a notifier to know about
+ * the state of a transport. This function matches the existing transports with
+ * the transport in the "notif_info" parameter. When a matching transport is
+ * found, the callback function in the "notif_info" parameter is called with
+ * the state of the matching transport.
+ *
+ * If an edge or transport is not defined, then all edges and/or transports
+ * will be matched and will receive up notifications.
+ */
+static void notif_if_up_all_xprts(
+ struct link_state_notifier_info *notif_info)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ struct glink_link_state_cb_info cb_info;
+
+ cb_info.link_state = GLINK_LINK_STATE_UP;
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt_ptr, &transport_list, list_node) {
+ if (strlen(notif_info->edge) &&
+ strcmp(notif_info->edge, xprt_ptr->edge))
+ continue;
+
+ if (strlen(notif_info->transport) &&
+ strcmp(notif_info->transport, xprt_ptr->name))
+ continue;
+
+ if (!xprt_is_fully_opened(xprt_ptr))
+ continue;
+
+ cb_info.transport = xprt_ptr->name;
+ cb_info.edge = xprt_ptr->edge;
+ notif_info->glink_link_state_notif_cb(&cb_info,
+ notif_info->priv);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+}
+
+/**
+ * check_link_notifier_and_notify() - Check and notify clients about link state
+ * @xprt_ptr: Transport whose state to be notified.
+ * @link_state: State of the transport to be notified.
+ *
+ * This function is called when the state of the transport changes. This
+ * function matches the transport with the clients that have registered to
+ * be notified about the state changes. When a matching client notifier is
+ * found, the callback function in the client notifier is called with the
+ * new state of the transport.
+ */
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+ enum glink_link_state link_state)
+{
+ struct link_state_notifier_info *notif_info;
+ struct glink_link_state_cb_info cb_info;
+
+ cb_info.link_state = link_state;
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_for_each_entry(notif_info, &link_state_notifier_list, list) {
+ if (strlen(notif_info->edge) &&
+ strcmp(notif_info->edge, xprt_ptr->edge))
+ continue;
+
+ if (strlen(notif_info->transport) &&
+ strcmp(notif_info->transport, xprt_ptr->name))
+ continue;
+
+ cb_info.transport = xprt_ptr->name;
+ cb_info.edge = xprt_ptr->edge;
+ notif_info->glink_link_state_notif_cb(&cb_info,
+ notif_info->priv);
+ }
+ mutex_unlock(&link_state_notifier_lock_lha1);
+}
+
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr: Open configuration structure (the structure is copied before
+ * glink_open returns). All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return: Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg)
+{
+ struct channel_ctx *ctx = NULL;
+ struct glink_core_xprt_ctx *transport_ptr;
+ size_t len;
+ int ret;
+ uint16_t best_id;
+
+ if (!cfg->edge || !cfg->name) {
+ GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ len = strlen(cfg->edge);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ len = strlen(cfg->name);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (cfg->transport) {
+ len = strlen(cfg->transport);
+ if (len == 0 || len >= GLINK_NAME_SIZE) {
+ GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n",
+ __func__,
+ "len >= GLINK_NAME_SIZE");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* confirm required notification parameters */
+ if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done
+ || !cfg->notify_state
+ || ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF)
+ && !cfg->notify_remote_rx_intent)) {
+ GLINK_ERR("%s: Incorrect notification parameters\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* find transport */
+ transport_ptr = find_open_transport(cfg->edge, cfg->transport,
+ cfg->options & GLINK_OPT_INITIAL_XPORT,
+ &best_id);
+ if (IS_ERR_OR_NULL(transport_ptr)) {
+ GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n",
+ cfg->transport, cfg->edge, __func__,
+ (unsigned)PTR_ERR(transport_ptr));
+ return ERR_PTR(-ENODEV);
+ }
+
+ /*
+ * look for an existing port structure which can occur in
+ * reopen and remote-open-first cases
+ */
+ ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+ if (ctx == NULL) {
+ GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
+ cfg->transport, cfg->edge, __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* port already exists */
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
+ /* not ready to be re-opened */
+ GLINK_INFO_CH_XPRT(ctx, transport_ptr,
+ "%s: Channel not ready to be re-opened. State: %u\n",
+ __func__, ctx->local_open_state);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* initialize port structure */
+ ctx->user_priv = cfg->priv;
+ ctx->rx_intent_req_timeout_jiffies =
+ msecs_to_jiffies(cfg->rx_intent_req_timeout_ms);
+ ctx->notify_rx = cfg->notify_rx;
+ ctx->notify_tx_done = cfg->notify_tx_done;
+ ctx->notify_state = cfg->notify_state;
+ ctx->notify_rx_intent_req = cfg->notify_rx_intent_req;
+ ctx->notify_rxv = cfg->notify_rxv;
+ ctx->notify_rx_sigs = cfg->notify_rx_sigs;
+ ctx->notify_rx_abort = cfg->notify_rx_abort;
+ ctx->notify_tx_abort = cfg->notify_tx_abort;
+ ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
+ ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+
+ if (!ctx->notify_rx_intent_req)
+ ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
+ if (!ctx->notify_rx_sigs)
+ ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs;
+ if (!ctx->notify_rx_abort)
+ ctx->notify_rx_abort = glink_dummy_notify_rx_abort;
+ if (!ctx->notify_tx_abort)
+ ctx->notify_tx_abort = glink_dummy_notify_tx_abort;
+
+ if (!ctx->rx_intent_req_timeout_jiffies)
+ ctx->rx_intent_req_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ ctx->local_xprt_req = best_id;
+ ctx->no_migrate = cfg->transport &&
+ !(cfg->options & GLINK_OPT_INITIAL_XPORT);
+ ctx->transport_ptr = transport_ptr;
+ ctx->local_open_state = GLINK_CHANNEL_OPENING;
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
+ __func__);
+
+ /* start local-open sequence */
+ ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops,
+ ctx->lcid, cfg->name, best_id);
+ if (ret) {
+ /* failure to send open command (transport failure) */
+ ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+ GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
+ __func__, ret);
+ return ERR_PTR(ret);
+ }
+
+ GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
+ __func__, ctx);
+
+ return ctx;
+}
+EXPORT_SYMBOL(glink_open);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ if (ctx == NULL)
+ return -EINVAL;
+
+ return ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_channel_id_for_handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ if (ctx == NULL)
+ return NULL;
+
+ return ctx->name;
+}
+EXPORT_SYMBOL(glink_get_channel_name_for_handle);
+
+/**
+ * glink_delete_ch_from_list() - delete the channel from the list
+ * @ctx: Pointer to channel context.
+ * @add_flcid: Boolean value to decide whether the lcid should be added or not.
+ *
+ * This function deletes the channel from the list along with the debugfs
+ * information associated with it. It also adds the channel lcid to the free
+ * lcid list except if the channel is deleted in case of ssr/unregister case.
+ * It can only called when channel is fully closed.
+ */
+static void glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ if (!list_empty(&ctx->port_list_node))
+ list_del_init(&ctx->port_list_node);
+ spin_unlock_irqrestore(
+ &ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ if (add_flcid)
+ glink_add_free_lcid_list(ctx);
+ mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3);
+ glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
+ mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb3);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle: handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret = 0;
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+
+ GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+ return 0;
+
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
+ /* close already pending */
+ return -EBUSY;
+ }
+
+ /* Set the channel state before removing it from xprt's list(s) */
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:%u->GLINK_CHANNEL_CLOSING\n",
+ __func__, ctx->local_open_state);
+ ctx->local_open_state = GLINK_CHANNEL_CLOSING;
+
+ ctx->pending_delete = true;
+ ctx->int_req_ack = false;
+ complete_all(&ctx->int_req_ack_complete);
+ complete_all(&ctx->int_req_complete);
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ if (!list_empty(&ctx->tx_ready_list_node))
+ list_del_init(&ctx->tx_ready_list_node);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+
+ if (ctx->transport_ptr->local_state != GLINK_XPRT_DOWN) {
+ glink_qos_reset_priority(ctx);
+ ret = ctx->transport_ptr->ops->tx_cmd_ch_close(
+ ctx->transport_ptr->ops,
+ ctx->lcid);
+ } else if (!strcmp(ctx->transport_ptr->name, "dummy")) {
+ /*
+ * This check will avoid any race condition when clients call
+ * glink_close before the dummy xprt swapping happens in link
+ * down scenario.
+ */
+ ret = 0;
+ xprt_ctx = ctx->transport_ptr;
+ rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+ glink_core_ch_close_ack_common(ctx);
+ if (ch_is_fully_closed(ctx)) {
+ glink_delete_ch_from_list(ctx, false);
+ rwref_put(&xprt_ctx->xprt_state_lhb0);
+ if (list_empty(&xprt_ctx->channels))
+ /* For the xprt reference */
+ rwref_put(&xprt_ctx->xprt_state_lhb0);
+ } else {
+ GLINK_ERR_CH(ctx,
+ "channel Not closed yet local state [%d] remote_state [%d]\n",
+ ctx->local_open_state, ctx->remote_opened);
+ }
+ rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_close);
+
+/**
+ * glink_tx_pkt_release() - Release a packet's transmit information
+ * @tx_pkt_ref: Packet information which needs to be released.
+ *
+ * This function is called when all the references to a packet information
+ * is dropped.
+ */
+static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref)
+{
+ struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref,
+ struct glink_core_tx_pkt,
+ pkt_ref);
+ if (!list_empty(&tx_info->list_done))
+ list_del_init(&tx_info->list_done);
+ if (!list_empty(&tx_info->list_node))
+ list_del_init(&tx_info->list_node);
+ kfree(tx_info);
+}
+
+/**
+ * glink_tx_common() - Common TX implementation
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @data: pointer to the data
+ * @size: size of data
+ * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer.
+ * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer.
+ * @tx_flags: Flags to indicate transmit options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side
+ * has not provided a receive intent that is big enough.
+ */
+static int glink_tx_common(void *handle, void *pkt_priv,
+ void *data, void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ uint32_t riid;
+ int ret = 0;
+ struct glink_core_tx_pkt *tx_info;
+ size_t intent_size;
+ bool is_atomic =
+ tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
+ unsigned long flags;
+
+ if (!size)
+ return -EINVAL;
+
+ if (!ctx)
+ return -EINVAL;
+
+ rwref_get(&ctx->ch_state_lhc0);
+ if (!(vbuf_provider || pbuf_provider)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ if (!ch_is_fully_opened(ctx)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EBUSY;
+ }
+
+ if (size > GLINK_MAX_PKT_SIZE) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
+ if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EOPNOTSUPP;
+ }
+ tracer_pkt_log_event(data, GLINK_CORE_TX);
+ }
+
+ /* find matching rx intent (first-fit algorithm for now) */
+ if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size)) {
+ if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
+ /* no rx intent available */
+ GLINK_ERR_CH(ctx,
+ "%s: R[%u]:%zu Intent not present for lcid\n",
+ __func__, riid, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+ if (is_atomic && !(ctx->transport_ptr->capabilities &
+ GCAP_AUTO_QUEUE_RX_INT)) {
+ GLINK_ERR_CH(ctx,
+ "%s: Cannot request intent in atomic context\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EINVAL;
+ }
+
+ /* request intent of correct size */
+ reinit_completion(&ctx->int_req_ack_complete);
+ ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req(
+ ctx->transport_ptr->ops, ctx->lcid, size);
+ if (ret) {
+ GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
+ __func__, ret);
+ rwref_put(&ctx->ch_state_lhc0);
+ return ret;
+ }
+
+ while (ch_pop_remote_rx_intent(ctx, size, &riid,
+ &intent_size)) {
+ if (is_atomic) {
+ GLINK_ERR_CH(ctx,
+ "%s Intent of size %zu not ready\n",
+ __func__, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+
+ if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
+ || !ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx,
+ "%s: Channel closed while waiting for intent\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EBUSY;
+ }
+
+ /* wait for the remote intent req ack */
+ if (!wait_for_completion_timeout(
+ &ctx->int_req_ack_complete,
+ ctx->rx_intent_req_timeout_jiffies)) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent request ack with size: %zu not granted for lcid\n",
+ __func__, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -ETIMEDOUT;
+ }
+
+ if (!ctx->int_req_ack) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent Request with size: %zu %s",
+ __func__, size,
+ "not granted for lcid\n");
+ rwref_put(&ctx->ch_state_lhc0);
+ return -EAGAIN;
+ }
+
+ /* wait for the rx_intent from remote side */
+ if (!wait_for_completion_timeout(
+ &ctx->int_req_complete,
+ ctx->rx_intent_req_timeout_jiffies)) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent request with size: %zu not granted for lcid\n",
+ __func__, size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -ETIMEDOUT;
+ }
+
+ reinit_completion(&ctx->int_req_complete);
+ }
+ }
+
+ if (!is_atomic) {
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ glink_pm_qos_vote(ctx->transport_ptr);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2,
+ flags);
+ }
+
+ GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
+ __func__, riid, intent_size,
+ data ? data : iovec, size, current->pid);
+ tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+ is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+ if (!tx_info) {
+ GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+ ch_push_remote_rx_intent(ctx, intent_size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return -ENOMEM;
+ }
+ rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
+ INIT_LIST_HEAD(&tx_info->list_done);
+ INIT_LIST_HEAD(&tx_info->list_node);
+ tx_info->pkt_priv = pkt_priv;
+ tx_info->data = data;
+ tx_info->riid = riid;
+ tx_info->rcid = ctx->rcid;
+ tx_info->size = size;
+ tx_info->size_remaining = size;
+ tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false;
+ tx_info->iovec = iovec ? iovec : (void *)tx_info;
+ tx_info->vprovider = vbuf_provider;
+ tx_info->pprovider = pbuf_provider;
+ tx_info->intent_size = intent_size;
+
+ /* schedule packet for transmit */
+ if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
+ (ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ ret = xprt_single_threaded_tx(ctx->transport_ptr,
+ ctx, tx_info);
+ else
+ xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+
+ rwref_put(&ctx->ch_state_lhc0);
+ return ret;
+}
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @data: pointer to the data
+ * @size: size of data
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side
+ * has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+ uint32_t tx_flags)
+{
+ return glink_tx_common(handle, pkt_priv, data, NULL, size,
+ tx_linear_vbuf_provider, NULL, tx_flags);
+}
+EXPORT_SYMBOL(glink_tx);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data type that is returned when a packet is received
+ * size: maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *intent_ptr;
+ int ret = 0;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ /* Can only queue rx intents if channel is fully opened */
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size);
+ if (!intent_ptr) {
+ GLINK_ERR_CH(ctx,
+ "%s: Intent pointer allocation failed size[%zu]\n",
+ __func__, size);
+ return -ENOMEM;
+ }
+ GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
+ intent_ptr->intent_size);
+
+ if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+ return ret;
+
+ /* notify remote side of rx intent */
+ ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
+ ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id);
+ if (ret)
+ /* unable to transmit, dequeue intent */
+ ch_remove_local_rx_intent(ctx, intent_ptr->id);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_queue_rx_intent);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent exists.
+ *
+ * @handle: handle returned by glink_open()
+ * @size: size of an intent to check or 0 for any intent
+ *
+ * Return: TRUE if an intent exists with greater than or equal to the size
+ * else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *intent;
+ unsigned long flags;
+
+ if (!ctx || !ch_is_fully_opened(ctx))
+ return false;
+
+ spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+ list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+ if (size <= intent->intent_size) {
+ spin_unlock_irqrestore(
+ &ctx->local_rx_intent_lst_lock_lhc1, flags);
+ return true;
+ }
+ }
+ spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+ return false;
+}
+EXPORT_SYMBOL(glink_rx_intent_exists);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle: handle returned by glink_open()
+ * @ptr: data pointer provided in the notify_rx() call
+ * @reuse: if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ struct glink_core_rx_intent *liid_ptr;
+ uint32_t id;
+ int ret = 0;
+
+ liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
+
+ if (IS_ERR_OR_NULL(liid_ptr)) {
+ /* invalid pointer */
+ GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+ return -EINVAL;
+ }
+
+ GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n",
+ __func__, liid_ptr->id, ptr, current->pid);
+ id = liid_ptr->id;
+ if (reuse) {
+ ret = ctx->transport_ptr->ops->reuse_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ if (ret) {
+ GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n",
+ __func__, ret, ptr);
+ ret = -ENOBUFS;
+ reuse = false;
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ }
+ } else {
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, liid_ptr);
+ }
+ ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse);
+ /* send rx done */
+ ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
+ ctx->lcid, id, reuse);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_rx_done);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @iovec: pointer to the vector (must remain valid until notify_tx_done
+ * notification)
+ * @size: size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ * in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ * in virtual address space
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side has
+ * not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags)
+{
+ return glink_tx_common(handle, pkt_priv, NULL, iovec, size,
+ vbuf_provider, pbuf_provider, tx_flags);
+}
+EXPORT_SYMBOL(glink_txv);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle: handle returned by glink_open()
+ * @sigs: modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ ctx->lsigs = sigs;
+
+ ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops,
+ ctx->lcid, ctx->lsigs);
+ GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_sigs_set);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx || !sigs)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ *sigs = ctx->lsigs;
+ return 0;
+}
+EXPORT_SYMBOL(glink_sigs_local_get);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx || !sigs)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ *sigs = ctx->rsigs;
+ return 0;
+}
+EXPORT_SYMBOL(glink_sigs_remote_get);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info: Data structure containing the link identification and callback.
+ * @priv: Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+ void *priv)
+{
+ struct link_state_notifier_info *notif_info;
+
+ if (!link_info || !link_info->glink_link_state_notif_cb)
+ return ERR_PTR(-EINVAL);
+
+ notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL);
+ if (!notif_info) {
+ GLINK_ERR("%s: Error allocating link state notifier info\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+ if (link_info->transport)
+ strlcpy(notif_info->transport, link_info->transport,
+ GLINK_NAME_SIZE);
+
+ if (link_info->edge)
+ strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE);
+ notif_info->priv = priv;
+ notif_info->glink_link_state_notif_cb =
+ link_info->glink_link_state_notif_cb;
+
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_add_tail(&notif_info->list, &link_state_notifier_list);
+ mutex_unlock(&link_state_notifier_lock_lha1);
+
+ notif_if_up_all_xprts(notif_info);
+ return notif_info;
+}
+EXPORT_SYMBOL(glink_register_link_state_cb);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle: Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/ transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle)
+{
+ struct link_state_notifier_info *notif_info, *tmp_notif_info;
+
+ if (IS_ERR_OR_NULL(notif_handle))
+ return;
+
+ mutex_lock(&link_state_notifier_lock_lha1);
+ list_for_each_entry_safe(notif_info, tmp_notif_info,
+ &link_state_notifier_list, list) {
+ if (notif_info == notif_handle) {
+ list_del(&notif_info->list);
+ mutex_unlock(&link_state_notifier_lock_lha1);
+ kfree(notif_info);
+ return;
+ }
+ }
+ mutex_unlock(&link_state_notifier_lock_lha1);
+ return;
+}
+EXPORT_SYMBOL(glink_unregister_link_state_cb);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle: Channel handle in which the latency is required.
+ * @latency_us: Latency requirement in units of micro-seconds.
+ * @pkt_size: Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+ unsigned long req_rate_kBps;
+
+ if (!ctx || !latency_us || !pkt_size)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us);
+
+ ret = glink_qos_assign_priority(ctx, req_rate_kBps);
+ if (ret < 0)
+ GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
+ __func__, latency_us, pkt_size);
+
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_latency);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle: Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ ret = glink_qos_reset_priority(ctx);
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_cancel);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle: Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+ int ret;
+ unsigned long flags;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ctx->tx_lists_lock_lhc3);
+ ret = glink_qos_add_ch_tx_intent(ctx);
+ spin_unlock(&ctx->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb2, flags);
+ return ret;
+}
+EXPORT_SYMBOL(glink_qos_start);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle: Channel handle for which the QoS ramp time is required.
+ * @pkt_size: Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds on success,
+ * standard Linux error codes cast to unsigned long on error.
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return (unsigned long)-EINVAL;
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ return (unsigned long)-EBUSY;
+ }
+
+ return ctx->transport_ptr->ops->get_power_vote_ramp_time(
+ ctx->transport_ptr->ops,
+ glink_prio_to_power_state(ctx->transport_ptr,
+ ctx->initial_priority));
+}
+EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle: Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx))
+ return -EBUSY;
+
+ if (!ctx->transport_ptr ||
+ !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops,
+ ctx->lcid);
+}
+EXPORT_SYMBOL(glink_rpm_rx_poll);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle: Channel handle in which this operation is performed.
+ * @mask: Flag to mask or unmask the interrupt.
+ * @pstruct: Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+
+ if (!ch_is_fully_opened(ctx))
+ return -EBUSY;
+
+ if (!ctx->transport_ptr ||
+ !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops,
+ ctx->lcid, mask, pstruct);
+
+}
+EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
+
+/**
+ * glink_wait_link_down() - Get status of link
+ * @handle: Channel handle in which this operation is performed
+ *
+ * This function will query the transport for its status, to allow clients to
+ * proceed in cleanup operations.
+ */
+int glink_wait_link_down(void *handle)
+{
+ struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+ if (!ctx)
+ return -EINVAL;
+ if (!ctx->transport_ptr)
+ return -EOPNOTSUPP;
+
+ return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
+}
+EXPORT_SYMBOL(glink_wait_link_down);
+
+/**
+ * glink_xprt_ctx_release - Free the transport context
+ * @ch_st_lock: handle to the rwref_lock associated with the transport
+ *
+ * This should only be called when the reference count associated with the
+ * transport goes to zero.
+ */
+void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+ struct glink_dbgfs xprt_rm_dbgfs;
+ struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+ struct glink_core_xprt_ctx, xprt_state_lhb0);
+ GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+ xprt_ctx->name,
+ xprt_ctx->edge);
+ xprt_rm_dbgfs.curr_name = xprt_ctx->name;
+ xprt_rm_dbgfs.par_name = "xprt";
+ glink_debugfs_remove_recur(&xprt_rm_dbgfs);
+ GLINK_INFO("%s: xprt debugfs removec\n", __func__);
+ destroy_workqueue(xprt_ctx->tx_wq);
+ glink_core_deinit_xprt_qos_cfg(xprt_ctx);
+ kfree(xprt_ctx);
+ xprt_ctx = NULL;
+}
+
+/**
+ * glink_dummy_xprt_ctx_release - free the dummy transport context
+ * @xprt_st_lock: Handle to the rwref_lock associated with the transport.
+ *
+ * The release function is called when all the channels on this dummy
+ * transport are closed and the reference count goes to zero.
+ */
+static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+ struct glink_core_xprt_ctx, xprt_state_lhb0);
+ GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+ xprt_ctx->name,
+ xprt_ctx->edge);
+ kfree(xprt_ctx);
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name: Name of the transport.
+ * @id: Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+ if (!strcmp(name, "smem")) {
+ *id = SMEM_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mailbox")) {
+ *id = SMEM_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "smd_trans")) {
+ *id = SMD_TRANS_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "lloop")) {
+ *id = LLOOP_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock")) {
+ *id = MOCK_XPRT_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock_low")) {
+ *id = MOCK_XPRT_LOW_ID;
+ return 0;
+ }
+ if (!strcmp(name, "mock_high")) {
+ *id = MOCK_XPRT_HIGH_ID;
+ return 0;
+ }
+ return -ENODEV;
+}
+EXPORT_SYMBOL(glink_xprt_name_to_id);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle: The handle to the qos related node in DT.
+ * @cfg: The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+ struct glink_core_transport_cfg *cfg)
+{
+ int rc, i;
+ char *key;
+ uint32_t num_flows;
+ uint32_t *arr32;
+
+ if (!phandle) {
+ GLINK_ERR("%s: phandle is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ key = "qcom,mtu-size";
+ rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu);
+ if (rc) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ return -ENODEV;
+ }
+
+ key = "qcom,tput-stats-cycle";
+ rc = of_property_read_u32(phandle, key, &cfg->token_count);
+ if (rc) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto error;
+ }
+
+ key = "qcom,flow-info";
+ if (!of_find_property(phandle, key, &num_flows)) {
+ GLINK_ERR("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto error;
+ }
+
+ num_flows /= sizeof(uint32_t);
+ if (num_flows % 2) {
+ GLINK_ERR("%s: Invalid flow info length\n", __func__);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ num_flows /= 2;
+ cfg->num_flows = num_flows;
+
+ cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)),
+ GFP_KERNEL);
+ if (!cfg->flow_info) {
+ GLINK_ERR("%s: Memory allocation for flow info failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto error;
+ }
+ arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL);
+ if (!arr32) {
+ GLINK_ERR("%s: Memory allocation for temporary array failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto temp_mem_alloc_fail;
+ }
+
+ of_property_read_u32_array(phandle, key, arr32, num_flows * 2);
+
+ for (i = 0; i < num_flows; i++) {
+ cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i];
+ cfg->flow_info[i].power_state = arr32[2 * i + 1];
+ }
+
+ kfree(arr32);
+ of_node_put(phandle);
+ return 0;
+
+temp_mem_alloc_fail:
+ kfree(cfg->flow_info);
+error:
+ cfg->mtu = 0;
+ cfg->token_count = 0;
+ cfg->num_flows = 0;
+ cfg->flow_info = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(of_get_glink_core_qos_cfg);
+
+/**
+ * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration
+ * @xprt_ptr: Transport to be initialized with QoS configuration.
+ * @cfg: Data structure containing QoS configuration.
+ *
+ * This function is used during the transport registration to initialize it
+ * with QoS configuration.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr,
+ struct glink_core_transport_cfg *cfg)
+{
+ int i;
+
+ xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU;
+ xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows :
+ GLINK_QOS_DEF_NUM_PRIORITY;
+ xprt_ptr->token_count = cfg->token_count ? cfg->token_count :
+ GLINK_QOS_DEF_NUM_TOKENS;
+
+ xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority *
+ sizeof(struct glink_qos_priority_bin),
+ GFP_KERNEL);
+ if (!xprt_ptr->prio_bin) {
+ GLINK_ERR("%s: unable to allocate priority bins\n", __func__);
+ return -ENOMEM;
+ }
+ for (i = 1; i < xprt_ptr->num_priority; i++) {
+ xprt_ptr->prio_bin[i].max_rate_kBps =
+ glink_qos_calc_rate_kBps(xprt_ptr->mtu,
+ cfg->flow_info[i].mtu_tx_time_us);
+ xprt_ptr->prio_bin[i].power_state =
+ cfg->flow_info[i].power_state;
+ INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready);
+ }
+ xprt_ptr->prio_bin[0].max_rate_kBps = 0;
+ if (cfg->flow_info)
+ xprt_ptr->prio_bin[0].power_state =
+ cfg->flow_info[0].power_state;
+ INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready);
+ xprt_ptr->threshold_rate_kBps =
+ xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps;
+
+ return 0;
+}
+
+/**
+ * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration
+ * @xprt_ptr: Transport to be deinitialized.
+ *
+ * This function is used during the time of transport unregistration to
+ * de-initialize the QoS configuration from a transport.
+ */
+static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ kfree(xprt_ptr->prio_bin);
+ xprt_ptr->prio_bin = NULL;
+ xprt_ptr->mtu = 0;
+ xprt_ptr->num_priority = 0;
+ xprt_ptr->token_count = 0;
+ xprt_ptr->threshold_rate_kBps = 0;
+}
+
+/**
+ * glink_core_register_transport() - register a new transport
+ * @if_ptr: The interface to the transport.
+ * @cfg: Description and configuration of the transport.
+ *
+ * Return: 0 on success, EINVAL for invalid input.
+ */
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+ struct glink_core_transport_cfg *cfg)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ size_t len;
+ uint16_t id;
+ int ret;
+ char log_name[GLINK_NAME_SIZE*2+2] = {0};
+
+ if (!if_ptr || !cfg || !cfg->name || !cfg->edge)
+ return -EINVAL;
+
+ len = strlen(cfg->name);
+ if (len == 0 || len >= GLINK_NAME_SIZE)
+ return -EINVAL;
+
+ len = strlen(cfg->edge);
+ if (len == 0 || len >= GLINK_NAME_SIZE)
+ return -EINVAL;
+
+ if (cfg->versions_entries < 1)
+ return -EINVAL;
+
+ ret = glink_xprt_name_to_id(cfg->name, &id);
+ if (ret)
+ return ret;
+
+ xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL);
+ if (xprt_ptr == NULL)
+ return -ENOMEM;
+
+ xprt_ptr->id = id;
+ rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+ glink_xprt_ctx_release);
+ strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE);
+ strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE);
+ xprt_ptr->versions = cfg->versions;
+ xprt_ptr->versions_entries = cfg->versions_entries;
+ xprt_ptr->local_version_idx = cfg->versions_entries - 1;
+ xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
+ xprt_ptr->l_features =
+ cfg->versions[cfg->versions_entries - 1].features;
+ if (!if_ptr->poll)
+ if_ptr->poll = dummy_poll;
+ if (!if_ptr->mask_rx_irq)
+ if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+ if (!if_ptr->reuse_rx_intent)
+ if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+ if (!if_ptr->wait_link_down)
+ if_ptr->wait_link_down = dummy_wait_link_down;
+ if (!if_ptr->tx_cmd_tracer_pkt)
+ if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
+ if (!if_ptr->get_power_vote_ramp_time)
+ if_ptr->get_power_vote_ramp_time =
+ dummy_get_power_vote_ramp_time;
+ if (!if_ptr->power_vote)
+ if_ptr->power_vote = dummy_power_vote;
+ if (!if_ptr->power_unvote)
+ if_ptr->power_unvote = dummy_power_unvote;
+ xprt_ptr->capabilities = 0;
+ xprt_ptr->ops = if_ptr;
+ spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+ xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */
+ INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+ xprt_ptr->max_cid = cfg->max_cid;
+ xprt_ptr->max_iid = cfg->max_iid;
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->remote_neg_completed = false;
+ INIT_LIST_HEAD(&xprt_ptr->channels);
+ ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
+ if (ret < 0) {
+ kfree(xprt_ptr);
+ return ret;
+ }
+ spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2);
+ mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3);
+ INIT_WORK(&xprt_ptr->tx_work, tx_work_func);
+ xprt_ptr->tx_wq = create_singlethread_workqueue("glink_tx");
+ if (IS_ERR_OR_NULL(xprt_ptr->tx_wq)) {
+ GLINK_ERR("%s: unable to allocate workqueue\n", __func__);
+ glink_core_deinit_xprt_qos_cfg(xprt_ptr);
+ kfree(xprt_ptr);
+ return -ENOMEM;
+ }
+ INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker);
+ pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
+ if_ptr->glink_core_priv = xprt_ptr;
+ if_ptr->glink_core_if_ptr = &core_impl;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_add_tail(&xprt_ptr->list_node, &transport_list);
+ mutex_unlock(&transport_list_lock_lha0);
+ glink_debugfs_add_xprt(xprt_ptr);
+ snprintf(log_name, sizeof(log_name), "%s_%s",
+ xprt_ptr->edge, xprt_ptr->name);
+ xprt_ptr->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
+ if (!xprt_ptr->log_ctx)
+ GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
+ __func__, xprt_ptr->edge, xprt_ptr->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(glink_core_register_transport);
+
+/**
+ * glink_core_unregister_transport() - unregister a transport
+ *
+ * @if_ptr: The interface to the transport.
+ */
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__);
+ if (xprt_ptr->local_state != GLINK_XPRT_DOWN) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: link_down should have been called before this\n",
+ __func__);
+ return;
+ }
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_del(&xprt_ptr->list_node);
+ mutex_unlock(&transport_list_lock_lha0);
+ flush_delayed_work(&xprt_ptr->pm_qos_work);
+ pm_qos_remove_request(&xprt_ptr->pm_qos_req);
+ ipc_log_context_destroy(xprt_ptr->log_ctx);
+ xprt_ptr->log_ctx = NULL;
+ rwref_put(&xprt_ptr->xprt_state_lhb0);
+}
+EXPORT_SYMBOL(glink_core_unregister_transport);
+
+/**
+ * glink_core_link_up() - transport link-up notification
+ *
+ * @if_ptr: pointer to transport interface
+ */
+static void glink_core_link_up(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ /* start local negotiation */
+ xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING;
+ xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->l_features =
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+ if_ptr->tx_cmd_version(if_ptr,
+ xprt_ptr->versions[xprt_ptr->local_version_idx].version,
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features);
+
+}
+
+/**
+ * glink_core_link_down() - transport link-down notification
+ *
+ * @if_ptr: pointer to transport interface
+ */
+static void glink_core_link_down(struct glink_transport_if *if_ptr)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+ rwref_write_get(&xprt_ptr->xprt_state_lhb0);
+ xprt_ptr->next_lcid = 1;
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
+ xprt_ptr->l_features =
+ xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+ xprt_ptr->remote_neg_completed = false;
+ rwref_write_put(&xprt_ptr->xprt_state_lhb0);
+ GLINK_DBG_XPRT(xprt_ptr,
+ "%s: Flushing work from tx_wq. Thread: %u\n", __func__,
+ current->pid);
+ flush_workqueue(xprt_ptr->tx_wq);
+ glink_core_channel_cleanup(xprt_ptr);
+ check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN);
+}
+
+/**
+ * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all
+ * the transport interface functions with a dummy
+ * @orig_xprt_ctx: Pointer to the original transport context.
+ *
+ * The dummy transport is used only when it is swapped with the actual transport
+ * pointer in ssr/unregister case.
+ *
+ * Return: Pointer to dummy transport context.
+ */
+static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
+ struct glink_core_xprt_ctx *orig_xprt_ctx)
+{
+
+ struct glink_core_xprt_ctx *xprt_ptr;
+ struct glink_transport_if *if_ptr;
+
+ xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
+ if (!xprt_ptr)
+ return ERR_PTR(-ENOMEM);
+ if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+ if (!if_ptr) {
+ kfree(xprt_ptr);
+ return ERR_PTR(-ENOMEM);
+ }
+ rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+ glink_dummy_xprt_ctx_release);
+
+ strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE);
+ strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE);
+ if_ptr->poll = dummy_poll;
+ if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+ if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+ if_ptr->wait_link_down = dummy_wait_link_down;
+ if_ptr->allocate_rx_intent = dummy_allocate_rx_intent;
+ if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent;
+ if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent;
+ if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done;
+ if_ptr->tx = dummy_tx;
+ if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req;
+ if_ptr->tx_cmd_remote_rx_intent_req_ack =
+ dummy_tx_cmd_remote_rx_intent_req_ack;
+ if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs;
+ if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close;
+ if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack;
+
+ xprt_ptr->ops = if_ptr;
+ xprt_ptr->log_ctx = log_ctx;
+ spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+ INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+ xprt_ptr->local_state = GLINK_XPRT_DOWN;
+ xprt_ptr->remote_neg_completed = false;
+ INIT_LIST_HEAD(&xprt_ptr->channels);
+ spin_lock_init(&xprt_ptr->tx_ready_lock_lhb2);
+ mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb3);
+ return xprt_ptr;
+}
+
+/**
+ * glink_core_channel_cleanup() - cleanup all channels for the transport
+ *
+ * @xprt_ptr: pointer to transport context
+ *
+ * This function should be called either from link_down or ssr
+ */
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ unsigned long flags, d_flags;
+ struct channel_ctx *ctx, *tmp_ctx;
+ struct channel_lcid *temp_lcid, *temp_lcid1;
+ struct glink_core_xprt_ctx *dummy_xprt_ctx;
+
+ dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr);
+ if (IS_ERR_OR_NULL(dummy_xprt_ctx)) {
+ GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
+ return;
+ }
+
+ rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
+ port_list_node) {
+ rwref_get(&ctx->ch_state_lhc0);
+ if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
+ ctx->local_open_state == GLINK_CHANNEL_OPENING) {
+ rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ list_move_tail(&ctx->port_list_node,
+ &dummy_xprt_ctx->channels);
+ spin_unlock_irqrestore(
+ &dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ ctx->transport_ptr = dummy_xprt_ctx;
+ } else {
+ /* local state is in either CLOSED or CLOSING */
+ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
+ flags);
+ glink_core_remote_close_common(ctx);
+ if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
+ glink_core_ch_close_ack_common(ctx);
+ /* Channel should be fully closed now. Delete here */
+ if (ch_is_fully_closed(ctx))
+ glink_delete_ch_from_list(ctx, false);
+ spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+ }
+ list_for_each_entry_safe(temp_lcid, temp_lcid1,
+ &xprt_ptr->free_lcid_list, list_node) {
+ list_del(&temp_lcid->list_node);
+ kfree(&temp_lcid->list_node);
+ }
+ spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ list_for_each_entry_safe(ctx, tmp_ctx, &dummy_xprt_ctx->channels,
+ port_list_node) {
+ rwref_get(&ctx->ch_state_lhc0);
+ spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ glink_core_remote_close_common(ctx);
+ spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+ d_flags);
+ rwref_put(&ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+ rwref_put(&dummy_xprt_ctx->xprt_state_lhb0);
+}
+/**
+ * glink_core_rx_cmd_version() - receive version/features from remote system
+ *
+ * @if_ptr: pointer to transport interface
+ * @r_version: remote version
+ * @r_features: remote features
+ *
+ * This function is called in response to a remote-initiated version/feature
+ * negotiation sequence.
+ */
+static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr,
+ uint32_t r_version, uint32_t r_features)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+ const struct glink_core_version *versions = xprt_ptr->versions;
+ bool neg_complete = false;
+ uint32_t l_version;
+
+ if (xprt_is_fully_opened(xprt_ptr)) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Negotiation already complete\n", __func__);
+ return;
+ }
+
+ l_version = versions[xprt_ptr->remote_version_idx].version;
+
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features, r_version, r_features);
+
+ if (l_version > r_version) {
+ /* Find matching version */
+ while (true) {
+ uint32_t rver_idx;
+
+ if (xprt_ptr->remote_version_idx == 0) {
+ /* version negotiation failed */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Transport negotiation failed\n",
+ __func__);
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ break;
+ }
+ --xprt_ptr->remote_version_idx;
+ rver_idx = xprt_ptr->remote_version_idx;
+
+ if (versions[rver_idx].version <= r_version) {
+ /* found a potential match */
+ l_version = versions[rver_idx].version;
+ xprt_ptr->l_features =
+ versions[rver_idx].features;
+ break;
+ }
+ }
+ }
+
+ if (l_version == r_version) {
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Remote and local version are matched %x:%08x\n",
+ __func__, r_version, r_features);
+ if (xprt_ptr->l_features != r_features) {
+ uint32_t rver_idx = xprt_ptr->remote_version_idx;
+
+ xprt_ptr->l_features = versions[rver_idx]
+ .negotiate_features(if_ptr,
+ &xprt_ptr->versions[rver_idx],
+ r_features);
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: negotiate features %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+ }
+ neg_complete = true;
+ }
+ if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features);
+
+ if (neg_complete) {
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Remote negotiation complete %x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features);
+
+ if (xprt_ptr->local_state == GLINK_XPRT_OPENED) {
+ xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+ l_version,
+ xprt_ptr->l_features);
+ }
+ if_ptr->glink_core_priv->remote_neg_completed = true;
+ if (xprt_is_fully_opened(xprt_ptr))
+ check_link_notifier_and_notify(xprt_ptr,
+ GLINK_LINK_STATE_UP);
+ }
+}
+
+/**
+ * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system
+ *
+ * @if_ptr: pointer to transport interface
+ * @r_version: remote version response
+ * @r_features: remote features response
+ *
+ * This function is called in response to a local-initiated version/feature
+ * negotiation sequence and is the counter-offer from the remote side based
+ * upon the initial version and feature set requested.
+ */
+static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t r_version, uint32_t r_features)
+{
+ struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+ const struct glink_core_version *versions = xprt_ptr->versions;
+ uint32_t l_version;
+ bool neg_complete = false;
+
+ if (xprt_is_fully_opened(xprt_ptr)) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Negotiation already complete\n", __func__);
+ return;
+ }
+
+ l_version = versions[xprt_ptr->local_version_idx].version;
+
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+ l_version, xprt_ptr->l_features, r_version, r_features);
+
+ if (l_version > r_version) {
+ /* find matching version */
+ while (true) {
+ uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+ if (xprt_ptr->local_version_idx == 0) {
+ /* version negotiation failed */
+ xprt_ptr->local_state = GLINK_XPRT_FAILED;
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Transport negotiation failed\n",
+ __func__);
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ break;
+ }
+ --xprt_ptr->local_version_idx;
+ lver_idx = xprt_ptr->local_version_idx;
+
+ if (versions[lver_idx].version <= r_version) {
+ /* found a potential match */
+ l_version = versions[lver_idx].version;
+ xprt_ptr->l_features =
+ versions[lver_idx].features;
+ break;
+ }
+ }
+ } else if (l_version == r_version) {
+ if (xprt_ptr->l_features != r_features) {
+ /* version matches, negotiate features */
+ uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+ xprt_ptr->l_features = versions[lver_idx]
+ .negotiate_features(if_ptr,
+ &versions[lver_idx],
+ r_features);
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: negotiation features %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+ } else {
+ neg_complete = true;
+ }
+ } else {
+ /*
+ * r_version > l_version
+ *
+ * Remote responded with a version greater than what we
+ * requested which is invalid and is treated as failure of the
+ * negotiation algorithm.
+ */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: [local]%x:%08x [remote]%x:%08x neg failure\n",
+ __func__, l_version, xprt_ptr->l_features, r_version,
+ r_features);
+ xprt_ptr->local_state = GLINK_XPRT_FAILED;
+ l_version = 0;
+ xprt_ptr->l_features = 0;
+ }
+
+ if (neg_complete) {
+ /* negotiation complete */
+ GLINK_INFO_XPRT(xprt_ptr,
+ "%s: Local negotiation complete %x:%08x\n",
+ __func__, l_version, xprt_ptr->l_features);
+
+ if (xprt_ptr->remote_neg_completed) {
+ xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+ l_version,
+ xprt_ptr->l_features);
+ }
+
+ xprt_ptr->local_state = GLINK_XPRT_OPENED;
+ if (xprt_is_fully_opened(xprt_ptr))
+ check_link_notifier_and_notify(xprt_ptr,
+ GLINK_LINK_STATE_UP);
+ } else {
+ if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features);
+ }
+}
+
+/**
+ * find_l_ctx_get() - find a local channel context based on a remote one
+ * @r_ctx: The remote channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding local ctx or NULL is not found.
+ */
+static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct channel_ctx *ctx;
+ unsigned long flags;
+ struct channel_ctx *l_ctx = NULL;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) {
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ if (xprt->local_state != GLINK_XPRT_OPENED) {
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ continue;
+ }
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(ctx, &xprt->channels,
+ port_list_node)
+ if (!strcmp(ctx->name, r_ctx->name) &&
+ ctx->local_xprt_req &&
+ ctx->local_xprt_resp) {
+ l_ctx = ctx;
+ rwref_get(&l_ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ return l_ctx;
+}
+
+/**
+ * find_r_ctx_get() - find a remote channel context based on a local one
+ * @l_ctx: The local channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context. The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding remote ctx or NULL is not found.
+ */
+static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx)
+{
+ struct glink_core_xprt_ctx *xprt;
+ struct channel_ctx *ctx;
+ unsigned long flags;
+ struct channel_ctx *r_ctx = NULL;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) {
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ if (xprt->local_state != GLINK_XPRT_OPENED) {
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ continue;
+ }
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_for_each_entry(ctx, &xprt->channels,
+ port_list_node)
+ if (!strcmp(ctx->name, l_ctx->name) &&
+ ctx->remote_xprt_req &&
+ ctx->remote_xprt_resp) {
+ r_ctx = ctx;
+ rwref_get(&r_ctx->ch_state_lhc0);
+ }
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ }
+ mutex_unlock(&transport_list_lock_lha0);
+
+ return r_ctx;
+}
+
+/**
+ * will_migrate() - will a channel migrate to a different transport
+ * @l_ctx: The local channel to migrate.
+ * @r_ctx: The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration will occur.
+ */
+static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+ uint16_t new_xprt;
+ bool migrate = false;
+
+ if (!r_ctx)
+ r_ctx = find_r_ctx_get(l_ctx);
+ else
+ rwref_get(&r_ctx->ch_state_lhc0);
+ if (!r_ctx)
+ return migrate;
+
+ if (!l_ctx)
+ l_ctx = find_l_ctx_get(r_ctx);
+ else
+ rwref_get(&l_ctx->ch_state_lhc0);
+ if (!l_ctx)
+ goto exit;
+
+ if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+ l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+ goto exit;
+ if (l_ctx->no_migrate)
+ goto exit;
+
+ if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+ l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+ new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+ if (new_xprt == l_ctx->transport_ptr->id)
+ goto exit;
+
+ migrate = true;
+exit:
+ if (l_ctx)
+ rwref_put(&l_ctx->ch_state_lhc0);
+ if (r_ctx)
+ rwref_put(&r_ctx->ch_state_lhc0);
+
+ return migrate;
+}
+
+/**
+ * ch_migrate() - migrate a channel to a different transport
+ * @l_ctx: The local channel to migrate.
+ * @r_ctx: The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration occurred.
+ */
+static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+ uint16_t new_xprt;
+ struct glink_core_xprt_ctx *xprt;
+ unsigned long flags;
+ struct channel_lcid *flcid;
+ uint16_t best_xprt = USHRT_MAX;
+ struct channel_ctx *ctx_clone;
+ bool migrated = false;
+
+ if (!r_ctx)
+ r_ctx = find_r_ctx_get(l_ctx);
+ else
+ rwref_get(&r_ctx->ch_state_lhc0);
+ if (!r_ctx)
+ return migrated;
+
+ if (!l_ctx)
+ l_ctx = find_l_ctx_get(r_ctx);
+ else
+ rwref_get(&l_ctx->ch_state_lhc0);
+ if (!l_ctx) {
+ rwref_put(&r_ctx->ch_state_lhc0);
+ return migrated;
+ }
+
+ if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+ l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+ goto exit;
+ if (l_ctx->no_migrate)
+ goto exit;
+
+ if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+ l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+ new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+ if (new_xprt == l_ctx->transport_ptr->id)
+ goto exit;
+
+ ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL);
+ if (!ctx_clone)
+ goto exit;
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+ if (xprt->id == new_xprt)
+ break;
+ mutex_unlock(&transport_list_lock_lha0);
+
+ spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_del_init(&l_ctx->port_list_node);
+ spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+
+ memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone));
+ ctx_clone->local_xprt_req = 0;
+ ctx_clone->local_xprt_resp = 0;
+ ctx_clone->remote_xprt_req = 0;
+ ctx_clone->remote_xprt_resp = 0;
+ ctx_clone->notify_state = NULL;
+ ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING;
+ rwref_lock_init(&ctx_clone->ch_state_lhc0, glink_ch_ctx_release);
+ init_completion(&ctx_clone->int_req_ack_complete);
+ init_completion(&ctx_clone->int_req_complete);
+ spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1);
+ spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2);
+ INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list);
+ INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list);
+ INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list);
+ INIT_LIST_HEAD(&ctx_clone->tx_active);
+ spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4);
+ INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done);
+ spin_lock_init(&ctx_clone->tx_lists_lock_lhc3);
+ spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&ctx_clone->port_list_node,
+ &l_ctx->transport_ptr->channels);
+ spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+ flags);
+
+ l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops,
+ l_ctx->lcid);
+
+ l_ctx->transport_ptr = xprt;
+ l_ctx->local_xprt_req = 0;
+ l_ctx->local_xprt_resp = 0;
+ if (new_xprt != r_ctx->transport_ptr->id) {
+ r_ctx->local_xprt_req = 0;
+ r_ctx->local_xprt_resp = 0;
+ r_ctx->remote_xprt_req = 0;
+ r_ctx->remote_xprt_resp = 0;
+
+ l_ctx->remote_xprt_req = 0;
+ l_ctx->remote_xprt_resp = 0;
+ l_ctx->remote_opened = false;
+
+ rwref_write_get(&xprt->xprt_state_lhb0);
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ if (list_empty(&xprt->free_lcid_list)) {
+ l_ctx->lcid = xprt->next_lcid++;
+ } else {
+ flcid = list_first_entry(&xprt->free_lcid_list,
+ struct channel_lcid, list_node);
+ l_ctx->lcid = flcid->lcid;
+ list_del(&flcid->list_node);
+ kfree(flcid);
+ }
+ list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+ rwref_write_put(&xprt->xprt_state_lhb0);
+ } else {
+ l_ctx->lcid = r_ctx->lcid;
+ l_ctx->rcid = r_ctx->rcid;
+ l_ctx->remote_opened = r_ctx->remote_opened;
+ l_ctx->remote_xprt_req = r_ctx->remote_xprt_req;
+ l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp;
+ glink_delete_ch_from_list(r_ctx, false);
+
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+ }
+
+
+ mutex_lock(&transport_list_lock_lha0);
+ list_for_each_entry(xprt, &transport_list, list_node)
+ if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+ if (xprt->id < best_xprt)
+ best_xprt = xprt->id;
+ mutex_unlock(&transport_list_lock_lha0);
+ l_ctx->local_open_state = GLINK_CHANNEL_OPENING;
+ l_ctx->local_xprt_req = best_xprt;
+ l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops,
+ l_ctx->lcid, l_ctx->name, best_xprt);
+
+ migrated = true;
+exit:
+ rwref_put(&l_ctx->ch_state_lhc0);
+ rwref_put(&r_ctx->ch_state_lhc0);
+
+ return migrated;
+}
+
+/**
+ * calculate_xprt_resp() - calculate the response to a remote xprt request
+ * @r_ctx: The channel the remote xprt request is for.
+ *
+ * Return: The calculated response.
+ */
+static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx)
+{
+ struct channel_ctx *l_ctx;
+
+ l_ctx = find_l_ctx_get(r_ctx);
+ if (!l_ctx) {
+ r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id;
+ } else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) {
+ r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+ } else {
+ if (!l_ctx->local_xprt_req)
+ r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+ else if (l_ctx->no_migrate)
+ r_ctx->remote_xprt_resp = l_ctx->local_xprt_req;
+ else
+ r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req,
+ r_ctx->remote_xprt_req);
+ }
+
+ if (l_ctx)
+ rwref_put(&l_ctx->ch_state_lhc0);
+
+ return r_ctx->remote_xprt_resp;
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @name: Channel name
+ * @req_xprt: Requested transport to migrate to
+ */
+static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
+ uint32_t rcid, const char *name, uint16_t req_xprt)
+{
+ struct channel_ctx *ctx;
+ uint16_t xprt_resp;
+ bool do_migrate;
+
+ ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+ if (ctx == NULL) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received, name '%s'\n",
+ __func__, rcid, name);
+ return;
+ }
+
+ /* port already exists */
+ if (ctx->remote_opened) {
+ GLINK_ERR_CH(ctx,
+ "%s: Duplicate remote open for rcid %u, name '%s'\n",
+ __func__, rcid, name);
+ return;
+ }
+
+ ctx->remote_opened = true;
+ ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid);
+ ctx->transport_ptr = if_ptr->glink_core_priv;
+
+ ctx->remote_xprt_req = req_xprt;
+ xprt_resp = calculate_xprt_resp(ctx);
+
+ do_migrate = will_migrate(NULL, ctx);
+ GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n",
+ __func__, req_xprt, xprt_resp);
+
+ if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp);
+ if (!do_migrate && ch_is_fully_opened(ctx))
+ ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+
+
+ if (do_migrate)
+ ch_migrate(NULL, ctx);
+}
+
+/**
+ * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request
+ *
+ * if_ptr: Pointer to transport instance
+ * lcid: Local Channel ID
+ * @xprt_resp: Response to the transport migration request
+ */
+static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint16_t xprt_resp)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid lcid %u received\n", __func__,
+ (unsigned)lcid);
+ return;
+ }
+
+ if (ctx->local_open_state != GLINK_CHANNEL_OPENING) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
+ __func__, ctx->local_open_state, current->pid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ ctx->local_xprt_resp = xprt_resp;
+ if (!ch_migrate(ctx, NULL)) {
+ ctx->local_open_state = GLINK_CHANNEL_OPENED;
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n",
+ __func__);
+
+ if (ch_is_fully_opened(ctx)) {
+ ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+ GLINK_INFO_PERF_CH(ctx,
+ "%s: notify state: GLINK_CONNECTED\n",
+ __func__);
+ }
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_close() - Receive remote close command
+ *
+ * if_ptr: Pointer to transport instance
+ * rcid: Remote Channel ID
+ */
+static void glink_core_rx_cmd_ch_remote_close(
+ struct glink_transport_if *if_ptr, uint32_t rcid)
+{
+ struct channel_ctx *ctx;
+ bool is_ch_fully_closed;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (!ctx->remote_opened) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected remote close receive for rcid %u\n",
+ __func__, (unsigned)rcid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+ GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
+
+ is_ch_fully_closed = glink_core_remote_close_common(ctx);
+
+ ctx->pending_delete = true;
+ if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
+
+ if (is_ch_fully_closed) {
+ glink_delete_ch_from_list(ctx, true);
+ flush_workqueue(ctx->transport_ptr->tx_wq);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack
+ *
+ * if_ptr: Pointer to transport instance
+ * lcid: Local Channel ID
+ */
+static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid)
+{
+ struct channel_ctx *ctx;
+ bool is_ch_fully_closed;
+
+ ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid lcid %u received\n", __func__,
+ (unsigned)lcid);
+ return;
+ }
+
+ if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+ GLINK_ERR_CH(ctx,
+ "%s: unexpected close ack receive for lcid %u\n",
+ __func__, (unsigned)lcid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ is_ch_fully_closed = glink_core_ch_close_ack_common(ctx);
+ if (is_ch_fully_closed) {
+ glink_delete_ch_from_list(ctx, true);
+ flush_workqueue(ctx->transport_ptr->tx_wq);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_remote_rx_intent_put() - Receive remove intent
+ *
+ * @if_ptr: Pointer to transport instance
+ * @rcid: Remote Channel ID
+ * @riid: Remote Intent ID
+ * @size: Size of the remote intent ID
+ */
+static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown rcid received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ ch_push_remote_rx_intent(ctx, size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent
+ * from remote side
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * size: size of the intent
+ *
+ * The function searches for the local channel to which the request for
+ * rx_intent has arrived and informs this request to the local channel through
+ * notify_rx_intent_req callback registered by the local channel.
+ */
+static void glink_core_rx_cmd_remote_rx_intent_req(
+ struct glink_transport_if *if_ptr, uint32_t rcid, size_t size)
+{
+ struct channel_ctx *ctx;
+ bool cb_ret;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+ if (!ctx->notify_rx_intent_req) {
+ GLINK_ERR_CH(ctx,
+ "%s: Notify function not defined for local channel",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size);
+ if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side
+ * for a local rx_intent request
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * size: size of the intent
+ *
+ * This function receives the ack for rx_intent request from local channel.
+ */
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+ *if_ptr, uint32_t rcid, bool granted)
+{
+ struct channel_ctx *ctx;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+ ctx->int_req_ack = granted;
+ complete_all(&ctx->int_req_ack_complete);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_get_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * liid: Local RX Intent ID
+ *
+ * Note that this function is designed to always be followed by a call to
+ * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport.
+ *
+ * Return: Pointer to RX intent structure (or NULL if none found)
+ */
+static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx(
+ struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid)
+{
+ struct channel_ctx *ctx;
+ struct glink_core_rx_intent *intent_ptr;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return NULL;
+ }
+
+ /* match pending intent */
+ intent_ptr = ch_get_local_rx_intent(ctx, liid);
+ if (intent_ptr == NULL) {
+ GLINK_ERR_CH(ctx,
+ "%s: L[%u]: No matching rx intent\n",
+ __func__, liid);
+ rwref_put(&ctx->ch_state_lhc0);
+ return NULL;
+ }
+
+ rwref_put(&ctx->ch_state_lhc0);
+ return intent_ptr;
+}
+
+/**
+ * glink_core_rx_put_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr: Pointer to the transport interface
+ * rcid: Remote channel ID
+ * intent_ptr: Pointer to the RX intent
+ * complete: Packet has been completely received
+ *
+ * Note that this function should always be preceded by a call to
+ * glink_core_rx_get_pkt_ctx().
+ */
+void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr,
+ uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete)
+{
+ struct channel_ctx *ctx;
+
+ if (!complete) {
+ GLINK_DBG_XPRT(if_ptr->glink_core_priv,
+ "%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n",
+ __func__, rcid, intent_ptr->id,
+ intent_ptr->pkt_size,
+ intent_ptr->write_offset);
+ return;
+ }
+
+ /* packet complete */
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid received %u\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (unlikely(intent_ptr->tracer_pkt)) {
+ tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX);
+ ch_set_local_rx_intent_notified(ctx, intent_ptr);
+ if (ctx->notify_rx_tracer_pkt)
+ ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv,
+ intent_ptr->pkt_priv, intent_ptr->data,
+ intent_ptr->pkt_size);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n",
+ __func__, intent_ptr->id,
+ intent_ptr->data ? intent_ptr->data : intent_ptr->iovec,
+ intent_ptr->write_offset);
+ if (!intent_ptr->data && !ctx->notify_rxv) {
+ /* Received a vector, but client can't handle a vector */
+ intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec,
+ intent_ptr->pkt_size,
+ intent_ptr->vprovider,
+ intent_ptr->pprovider);
+ if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Error %ld linearizing vector\n", __func__,
+ PTR_ERR(intent_ptr->bounce_buf));
+ BUG();
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+ }
+
+ ch_set_local_rx_intent_notified(ctx, intent_ptr);
+ if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) {
+ ctx->notify_rx(ctx, intent_ptr->data ?
+ intent_ptr->data : intent_ptr->bounce_buf,
+ intent_ptr->pkt_size);
+ } else if (ctx->notify_rxv) {
+ ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+ intent_ptr->iovec, intent_ptr->pkt_size,
+ intent_ptr->vprovider, intent_ptr->pprovider);
+ } else {
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: Unable to process rx data\n", __func__);
+ BUG();
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command
+ * @xprt_ptr: Transport to send packet on.
+ * @rcid: Remote channel ID
+ * @riid: Remote intent ID
+ * @reuse: Reuse the consumed intent
+ */
+void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, bool reuse)
+{
+ struct channel_ctx *ctx;
+ struct glink_core_tx_pkt *tx_pkt;
+ unsigned long flags;
+ size_t intent_size;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown RCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ rcid);
+ return;
+ }
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ tx_pkt = ch_get_tx_pending_remote_done(ctx, riid);
+ if (IS_ERR_OR_NULL(tx_pkt)) {
+ /*
+ * FUTURE - in the case of a zero-copy transport, this is a
+ * fatal protocol failure since memory corruption could occur
+ * in this case. Prevent this by adding code in glink_close()
+ * to recall any buffers in flight / wait for them to be
+ * returned.
+ */
+ GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n",
+ __func__,
+ (unsigned)riid);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ /* notify client */
+ ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
+ tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
+ intent_size = tx_pkt->intent_size;
+ ch_remove_tx_pending_remote_done(ctx, tx_pkt);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ if (reuse)
+ ch_push_remote_rx_intent(ctx, intent_size, riid);
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+/**
+ * xprt_schedule_tx() - Schedules packet for transmit.
+ * @xprt_ptr: Transport to send packet on.
+ * @ch_ptr: Channel to send packet on.
+ * @tx_info: Packet to transmit.
+ */
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info)
+{
+ unsigned long flags;
+
+ if (unlikely(xprt_ptr->local_state == GLINK_XPRT_DOWN)) {
+ GLINK_ERR_CH(ch_ptr, "%s: Error XPRT is down\n", __func__);
+ kfree(tx_info);
+ return;
+ }
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ if (unlikely(!ch_is_fully_opened(ch_ptr))) {
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ GLINK_ERR_CH(ch_ptr, "%s: Channel closed before tx\n",
+ __func__);
+ kfree(tx_info);
+ return;
+ }
+ if (list_empty(&ch_ptr->tx_ready_list_node))
+ list_add_tail(&ch_ptr->tx_ready_list_node,
+ &xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready);
+
+ spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+ list_add_tail(&tx_info->list_node, &ch_ptr->tx_active);
+ glink_qos_do_ch_tx(ch_ptr);
+ if (unlikely(tx_info->tracer_pkt))
+ tracer_pkt_log_event((void *)(tx_info->data),
+ GLINK_QUEUE_TO_SCHEDULER);
+
+ spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ queue_work(xprt_ptr->tx_wq, &xprt_ptr->tx_work);
+}
+
+/**
+ * xprt_single_threaded_tx() - Transmit in the context of sender.
+ * @xprt_ptr: Transport to send packet on.
+ * @ch_ptr: Channel to send packet on.
+ * @tx_info: Packet to transmit.
+ */
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+ struct channel_ctx *ch_ptr,
+ struct glink_core_tx_pkt *tx_info)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+ do {
+ ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops,
+ ch_ptr->lcid, tx_info);
+ } while (ret == -EAGAIN);
+ if (ret < 0 || tx_info->size_remaining) {
+ GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n",
+ __func__, ret);
+ kfree(tx_info);
+ } else {
+ list_add_tail(&tx_info->list_done,
+ &ch_ptr->tx_pending_remote_done);
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+ return ret;
+}
+
+/**
+ * glink_scheduler_eval_prio() - Evaluate the channel priority
+ * @ctx: Channel whose priority is evaluated.
+ * @xprt_ctx: Transport in which the channel is part of.
+ *
+ * This function is called by the packet scheduler to measure the traffic
+ * rate observed in the channel and compare it against the traffic rate
+ * requested by the channel. The comparison result is used to evaluate the
+ * priority of the channel.
+ */
+static void glink_scheduler_eval_prio(struct channel_ctx *ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ unsigned long token_end_time;
+ unsigned long token_consume_time, rem;
+ unsigned long obs_rate_kBps;
+
+ if (ctx->initial_priority == 0)
+ return;
+
+ if (ctx->token_count)
+ return;
+
+ token_end_time = arch_counter_get_cntpct();
+
+ token_consume_time = NSEC_PER_SEC;
+ rem = do_div(token_consume_time, arch_timer_get_rate());
+ token_consume_time = (token_end_time - ctx->token_start_time) *
+ token_consume_time;
+ rem = do_div(token_consume_time, 1000);
+ obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len,
+ token_consume_time);
+ if (obs_rate_kBps > ctx->req_rate_kBps) {
+ GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n",
+ __func__, obs_rate_kBps, ctx->req_rate_kBps);
+ glink_qos_update_ch_prio(ctx, 0);
+ } else {
+ glink_qos_update_ch_prio(ctx, ctx->initial_priority);
+ }
+
+ ctx->token_count = xprt_ctx->token_count;
+ ctx->txd_len = 0;
+ ctx->token_start_time = arch_counter_get_cntpct();
+}
+
+/**
+ * glink_scheduler_tx() - Transmit operation by the scheduler
+ * @ctx: Channel which is scheduled for transmission.
+ * @xprt_ctx: Transport context in which the transmission is performed.
+ *
+ * This function is called by the scheduler after scheduling a channel for
+ * transmission over the transport.
+ *
+ * Return: return value as returned by the transport on success,
+ * standard Linux error codes on failure.
+ */
+static int glink_scheduler_tx(struct channel_ctx *ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ unsigned long flags;
+ struct glink_core_tx_pkt *tx_info;
+ size_t txd_len = 0;
+ size_t tx_len = 0;
+ uint32_t num_pkts = 0;
+ int ret;
+
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ while (txd_len < xprt_ctx->mtu &&
+ !list_empty(&ctx->tx_active)) {
+ tx_info = list_first_entry(&ctx->tx_active,
+ struct glink_core_tx_pkt, list_node);
+ rwref_get(&tx_info->pkt_ref);
+
+ spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
+ if (list_empty(&tx_info->list_done))
+ list_add(&tx_info->list_done,
+ &ctx->tx_pending_remote_done);
+ spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ if (unlikely(tx_info->tracer_pkt)) {
+ tracer_pkt_log_event((void *)(tx_info->data),
+ GLINK_SCHEDULER_TX);
+ ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops,
+ ctx->lcid, tx_info);
+ } else {
+ tx_len = tx_info->size_remaining <
+ (xprt_ctx->mtu - txd_len) ?
+ tx_info->size_remaining :
+ (xprt_ctx->mtu - txd_len);
+ tx_info->tx_len = tx_len;
+ ret = xprt_ctx->ops->tx(xprt_ctx->ops,
+ ctx->lcid, tx_info);
+ }
+ spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+ if (ret == -EAGAIN) {
+ /*
+ * transport unable to send at the moment and will call
+ * tx_resume() when it can send again.
+ */
+ rwref_put(&tx_info->pkt_ref);
+ break;
+ } else if (ret < 0) {
+ /*
+ * General failure code that indicates that the
+ * transport is unable to recover. In this case, the
+ * communication failure will be detected at a higher
+ * level and a subsystem restart of the affected system
+ * will be triggered.
+ */
+ GLINK_ERR_XPRT(xprt_ctx,
+ "%s: unrecoverable xprt failure %d\n",
+ __func__, ret);
+ rwref_put(&tx_info->pkt_ref);
+ break;
+ } else if (!ret && tx_info->size_remaining) {
+ /*
+ * Transport unable to send any data on this channel.
+ * Break out of the loop so that the scheduler can
+ * continue with the next channel.
+ */
+ break;
+ } else {
+ txd_len += tx_len;
+ }
+
+ if (!tx_info->size_remaining) {
+ num_pkts++;
+ list_del_init(&tx_info->list_node);
+ rwref_put(&tx_info->pkt_ref);
+ }
+ }
+
+ ctx->txd_len += txd_len;
+ if (txd_len) {
+ if (num_pkts >= ctx->token_count)
+ ctx->token_count = 0;
+ else if (num_pkts)
+ ctx->token_count -= num_pkts;
+ else
+ ctx->token_count--;
+ }
+ spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+ return ret;
+}
+
+/**
+ * tx_work_func() - Transmit worker
+ * @work: Linux work structure
+ */
+static void tx_work_func(struct work_struct *work)
+{
+ struct glink_core_xprt_ctx *xprt_ptr =
+ container_of(work, struct glink_core_xprt_ctx, tx_work);
+ struct channel_ctx *ch_ptr;
+ uint32_t prio;
+ uint32_t tx_ready_head_prio = 0;
+ struct channel_ctx *tx_ready_head = NULL;
+ bool transmitted_successfully = true;
+ unsigned long flags;
+ int ret = 0;
+
+ GLINK_PERF("%s: worker starting\n", __func__);
+
+ while (1) {
+ prio = xprt_ptr->num_priority - 1;
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) {
+ if (prio == 0) {
+ spin_unlock_irqrestore(
+ &xprt_ptr->tx_ready_lock_lhb2, flags);
+ return;
+ }
+ prio--;
+ }
+ glink_pm_qos_vote(xprt_ptr);
+ ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
+ struct channel_ctx, tx_ready_list_node);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
+ tx_ready_head = ch_ptr;
+ tx_ready_head_prio = prio;
+ }
+
+ if (ch_ptr == tx_ready_head && !transmitted_successfully) {
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: Unable to send data on this transport.\n",
+ __func__);
+ break;
+ }
+ transmitted_successfully = false;
+
+ ret = glink_scheduler_tx(ch_ptr, xprt_ptr);
+ if (ret == -EAGAIN) {
+ /*
+ * transport unable to send at the moment and will call
+ * tx_resume() when it can send again.
+ */
+ break;
+ } else if (ret < 0) {
+ /*
+ * General failure code that indicates that the
+ * transport is unable to recover. In this case, the
+ * communication failure will be detected at a higher
+ * level and a subsystem restart of the affected system
+ * will be triggered.
+ */
+ GLINK_ERR_XPRT(xprt_ptr,
+ "%s: unrecoverable xprt failure %d\n",
+ __func__, ret);
+ break;
+ } else if (!ret) {
+ /*
+ * Transport unable to send any data on this channel,
+ * but didn't return an error. Move to the next channel
+ * and continue.
+ */
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2,
+ flags);
+ continue;
+ }
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+
+ glink_scheduler_eval_prio(ch_ptr, xprt_ptr);
+ if (list_empty(&ch_ptr->tx_active)) {
+ list_del_init(&ch_ptr->tx_ready_list_node);
+ glink_qos_done_ch_tx(ch_ptr);
+ }
+
+ spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+
+ tx_ready_head = NULL;
+ transmitted_successfully = true;
+ }
+ glink_pm_qos_unvote(xprt_ptr);
+ GLINK_PERF("%s: worker exiting\n", __func__);
+}
+
+static void glink_core_tx_resume(struct glink_transport_if *if_ptr)
+{
+ queue_work(if_ptr->glink_core_priv->tx_wq,
+ &if_ptr->glink_core_priv->tx_work);
+}
+
+/**
+ * glink_pm_qos_vote() - Add Power Management QoS Vote
+ * @xprt_ptr: Transport for power vote
+ *
+ * Note - must be called with tx_ready_lock_lhb2 locked.
+ */
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ if (glink_pm_qos && !xprt_ptr->qos_req_active) {
+ GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos);
+ pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos);
+ xprt_ptr->qos_req_active = true;
+ }
+ xprt_ptr->tx_path_activity = true;
+}
+
+/**
+ * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal
+ * @xprt_ptr: Transport for power vote removal
+ *
+ * Note - must be called with tx_ready_lock_lhb2 locked.
+ */
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+ xprt_ptr->tx_path_activity = false;
+ if (xprt_ptr->qos_req_active) {
+ GLINK_PERF("%s: qos unvote\n", __func__);
+ schedule_delayed_work(&xprt_ptr->pm_qos_work,
+ msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS));
+ }
+}
+
+/**
+ * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote
+ * @work: Delayed work structure
+ *
+ * Removes PM QoS vote if no additional transmit activity has occurred between
+ * the unvote and when this worker runs.
+ */
+static void glink_pm_qos_cancel_worker(struct work_struct *work)
+{
+ struct glink_core_xprt_ctx *xprt_ptr;
+ unsigned long flags;
+
+ xprt_ptr = container_of(to_delayed_work(work),
+ struct glink_core_xprt_ctx, pm_qos_work);
+
+ spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb2, flags);
+ if (!xprt_ptr->tx_path_activity) {
+ /* no more tx activity */
+ GLINK_PERF("%s: qos off\n", __func__);
+ pm_qos_update_request(&xprt_ptr->pm_qos_req,
+ PM_QOS_DEFAULT_VALUE);
+ xprt_ptr->qos_req_active = false;
+ }
+ xprt_ptr->tx_path_activity = false;
+ spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb2, flags);
+}
+
+/**
+ * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command
+ *
+ * if_ptr: Pointer to transport instance
+ * rcid: Remote Channel ID
+ */
+static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t sigs)
+{
+ struct channel_ctx *ctx;
+ uint32_t old_sigs;
+
+ ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+ if (!ctx) {
+ /* unknown LCID received - this shouldn't happen */
+ GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+ "%s: invalid rcid %u received\n", __func__,
+ (unsigned)rcid);
+ return;
+ }
+
+ if (!ch_is_fully_opened(ctx)) {
+ GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+ __func__);
+ rwref_put(&ctx->ch_state_lhc0);
+ return;
+ }
+
+ old_sigs = ctx->rsigs;
+ ctx->rsigs = sigs;
+ if (ctx->notify_rx_sigs) {
+ ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs);
+ GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n",
+ __func__, old_sigs, ctx->rsigs);
+ }
+ rwref_put(&ctx->ch_state_lhc0);
+}
+
+static struct glink_core_if core_impl = {
+ .link_up = glink_core_link_up,
+ .link_down = glink_core_link_down,
+ .rx_cmd_version = glink_core_rx_cmd_version,
+ .rx_cmd_version_ack = glink_core_rx_cmd_version_ack,
+ .rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open,
+ .rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack,
+ .rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close,
+ .rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack,
+ .rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
+ .rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
+ .rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+ .rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
+ .rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
+ .rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
+ .tx_resume = glink_core_tx_resume,
+ .rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs,
+};
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i)
+{
+ if (xprt_i == NULL)
+ return;
+
+ mutex_lock(&transport_list_lock_lha0);
+ xprt_i->xprt_list = &transport_list;
+ xprt_i->i_curr = list_entry(&transport_list,
+ struct glink_core_xprt_ctx, list_node);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_init);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i: pointer to the transport context iterator.
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i)
+{
+ if (xprt_i == NULL)
+ return;
+
+ xprt_i->xprt_list = NULL;
+ xprt_i->i_curr = NULL;
+ mutex_unlock(&transport_list_lock_lha0);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_end);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+ struct xprt_ctx_iterator *xprt_i)
+{
+ struct glink_core_xprt_ctx *xprt_ctx = NULL;
+
+ if (xprt_i == NULL)
+ return xprt_ctx;
+
+ if (list_empty(xprt_i->xprt_list))
+ return xprt_ctx;
+
+ list_for_each_entry_continue(xprt_i->i_curr,
+ xprt_i->xprt_list, list_node) {
+ xprt_ctx = xprt_i->i_curr;
+ break;
+ }
+ return xprt_ctx;
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_next);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+
+ return xprt_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_xprt_name);
+
+/**
+ * glink_get_xprt_name() - get the name of the remote processor/edge
+ * of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: Name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+ return xprt_ctx->edge;
+}
+EXPORT_SYMBOL(glink_get_xprt_edge_name);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: Name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ if (xprt_ctx == NULL)
+ return NULL;
+
+ return glink_get_xprt_state_string(xprt_ctx->local_state);
+}
+EXPORT_SYMBOL(glink_get_xprt_state);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ * of local transport in glink
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ const struct glink_core_version *ver = NULL;
+ if (xprt_ctx == NULL)
+ return ver;
+
+ ver = &xprt_ctx->versions[xprt_ctx->local_version_idx];
+ return ver;
+}
+EXPORT_SYMBOL(glink_get_xprt_version_features);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter: pointer to the channel context iterator.
+ * xprt: pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt)
+{
+ unsigned long flags;
+
+ if (ch_iter == NULL || xprt == NULL)
+ return;
+
+ spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+ ch_iter->ch_list = &(xprt->channels);
+ ch_iter->i_curr = list_entry(&(xprt->channels),
+ struct channel_ctx, port_list_node);
+ ch_iter->ch_list_flags = flags;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_init);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter: pointer to the channel context iterator.
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt)
+{
+ if (ch_iter == NULL || xprt == NULL)
+ return;
+
+ spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+ ch_iter->ch_list_flags);
+ ch_iter->ch_list = NULL;
+ ch_iter->i_curr = NULL;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_end);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i: pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i)
+{
+ struct channel_ctx *ch_ctx = NULL;
+
+ if (c_i == NULL)
+ return ch_ctx;
+
+ if (list_empty(c_i->ch_list))
+ return ch_ctx;
+
+ list_for_each_entry_continue(c_i->i_curr,
+ c_i->ch_list, port_list_node) {
+ ch_ctx = c_i->i_curr;
+ break;
+ }
+ return ch_ctx;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_next);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_ch_name);
+
+/**
+ * glink_get_ch_edge_name() - get the edge on whcih channel is created
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the edge, NULL in case of invalid input
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->transport_ptr->edge;
+}
+EXPORT_SYMBOL(glink_get_ch_edge_name);
+
+/**
+ * glink_get_ch_lcid() - get the local channel ID
+ * @c_i: pointer to the channel context.
+ *
+ * Return: local channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ return ch_ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_ch_lcid);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ return ch_ctx->rcid;
+}
+EXPORT_SYMBOL(glink_get_ch_rcid);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: Name of the local channel state, NUll in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return glink_get_ch_state_string(ch_ctx->local_open_state);
+}
+EXPORT_SYMBOL(glink_get_ch_lstate);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->remote_opened;
+}
+EXPORT_SYMBOL(glink_get_ch_rstate);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ * the channel belongs
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return NULL;
+
+ return ch_ctx->transport_ptr->name;
+}
+EXPORT_SYMBOL(glink_get_ch_xprt_name);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ * through this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ /* FUTURE: packet stats not yet implemented */
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_tx_pkt_count);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ * recieved at this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx)
+{
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ /* FUTURE: packet stats not yet implemented */
+
+ return -ENOSYS;
+}
+EXPORT_SYMBOL(glink_get_ch_rx_pkt_count);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ * at local side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx)
+{
+ struct glink_core_rx_intent *intent;
+ int ilrx_count = 0;
+
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list)
+ ilrx_count++;
+
+ return ilrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_lintents_queued);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ * from remote side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx)
+{
+ struct glink_core_rx_intent *intent;
+ int irrx_count = 0;
+
+ if (ch_ctx == NULL)
+ return -EINVAL;
+
+ list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list)
+ irrx_count++;
+
+ return irrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_rintents_queued);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx: pointer to the channel context.
+ * ch_ctx_i: pointer to a structure that will contain intent details
+ *
+ * This function is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+ struct glink_ch_intent_info *ch_ctx_i)
+{
+ if (ch_ctx == NULL || ch_ctx_i == NULL)
+ return;
+
+ ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1;
+ ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list;
+ ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list;
+ ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2;
+ ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list;
+}
+EXPORT_SYMBOL(glink_get_ch_intent_info);
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void)
+{
+ return glink_debug_mask;
+}
+EXPORT_SYMBOL(glink_get_debug_mask);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void)
+{
+ return log_ctx;
+}
+EXPORT_SYMBOL(glink_get_log_ctx);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for GLINK xprts.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt)
+{
+ if (xprt)
+ return xprt->log_ctx;
+ else
+ return NULL;
+}
+EXPORT_SYMBOL(glink_get_xprt_log_ctx);
+
+void *qcom_glink_get_drvdata(void *ch)
+{
+ struct channel_ctx *channel = ch;
+
+ return channel->drvdata;
+}
+EXPORT_SYMBOL(qcom_glink_get_drvdata);
+
+void qcom_glink_set_drvdata(void *ch, void *data)
+{
+ struct channel_ctx *channel = ch;
+
+ channel->drvdata = data;
+}
+EXPORT_SYMBOL(qcom_glink_set_drvdata);
+
+static int glink_init(void)
+{
+ log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+ if (!log_ctx)
+ GLINK_ERR("%s: unable to create log context\n", __func__);
+ glink_debugfs_init();
+
+ return 0;
+}
+arch_initcall(glink_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
new file mode 100644
index 0000000000000..93c59d9c4aa1f
--- /dev/null
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -0,0 +1,213 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_CORE_IF_H_
+#define _SOC_QCOM_GLINK_CORE_IF_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include "glink_private.h"
+
+/* Local Channel state */
+enum local_channel_state_e {
+ GLINK_CHANNEL_CLOSED = 0,
+ GLINK_CHANNEL_OPENING,
+ GLINK_CHANNEL_OPENED,
+ GLINK_CHANNEL_CLOSING,
+};
+
+/* Transport Negotiation State */
+enum transport_state_e {
+ GLINK_XPRT_DOWN,
+ GLINK_XPRT_NEGOTIATING,
+ GLINK_XPRT_OPENED,
+ GLINK_XPRT_FAILED,
+};
+
+struct channel_ctx;
+struct glink_core_xprt_ctx;
+struct glink_transport_if;
+struct glink_core_version;
+
+/**
+ * struct glink_core_version - Individual version element
+ *
+ * version: supported version
+ * features: all supported features for version
+ */
+struct glink_core_version {
+ uint32_t version;
+ uint32_t features;
+
+ uint32_t (*negotiate_features)(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version_ptr,
+ uint32_t features);
+};
+
+/**
+ * RX intent
+ *
+ * data: pointer to the data (may be NULL for zero-copy)
+ * id: remote or local intent ID
+ * pkt_size: total size of packet
+ * write_offset: next write offset (initially 0)
+ * intent_size: size of the original intent (do not modify)
+ * tracer_pkt: Flag to indicate if the data is a tracer packet
+ * iovec: Pointer to vector buffer if the transport passes a vector buffer
+ * vprovider: Virtual address-space buffer provider for a vector buffer
+ * pprovider: Physical address-space buffer provider for a vector buffer
+ * pkt_priv: G-Link core owned packet-private data
+ * list: G-Link core owned list node
+ * bounce_buf: Pointer to the temporary/internal bounce buffer
+ */
+struct glink_core_rx_intent {
+ void *data;
+ uint32_t id;
+ size_t pkt_size;
+ size_t write_offset;
+ size_t intent_size;
+ bool tracer_pkt;
+ void *iovec;
+ void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+ void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+
+ /* G-Link-Core-owned elements - please ignore */
+ struct list_head list;
+ const void *pkt_priv;
+ void *bounce_buf;
+};
+
+/**
+ * struct glink_core_flow_info - Flow specific Information
+ * @mtu_tx_time_us: Time to transmit an MTU in microseconds.
+ * @power_state: Power state associated with the traffic flow.
+ */
+struct glink_core_flow_info {
+ unsigned long mtu_tx_time_us;
+ uint32_t power_state;
+};
+
+/**
+ * struct glink_core_transport_cfg - configuration of a new transport
+ * @name: Name of the transport.
+ * @edge: Subsystem the transport connects to.
+ * @versions: Array of transport versions supported.
+ * @versions_entries: Number of entries in @versions.
+ * @max_cid: Maximum number of channel identifiers supported.
+ * @max_iid: Maximum number of intent identifiers supported.
+ * @mtu: MTU supported by this transport.
+ * @num_flows: Number of traffic flows/priority buckets.
+ * @flow_info: Information about each flow/priority.
+ * @token_count: Number of tokens per assignment.
+ */
+struct glink_core_transport_cfg {
+ const char *name;
+ const char *edge;
+ const struct glink_core_version *versions;
+ size_t versions_entries;
+ uint32_t max_cid;
+ uint32_t max_iid;
+
+ size_t mtu;
+ uint32_t num_flows;
+ struct glink_core_flow_info *flow_info;
+ uint32_t token_count;
+};
+
+struct glink_core_if {
+ /* Negotiation */
+ void (*link_up)(struct glink_transport_if *if_ptr);
+ void (*link_down)(struct glink_transport_if *if_ptr);
+ void (*rx_cmd_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+
+ /* channel management */
+ void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, const char *name, uint16_t req_xprt);
+ void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint16_t xprt_resp);
+ void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr,
+ uint32_t rcid);
+ void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr,
+ uint32_t lcid);
+
+ /* channel data */
+ struct glink_core_rx_intent * (*rx_get_pkt_ctx)(
+ struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t liid);
+ void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid,
+ struct glink_core_rx_intent *intent_ptr, bool complete);
+ void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t riid, size_t size);
+ void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
+ uint32_t riid, bool reuse);
+ void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, size_t size);
+ void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, bool granted);
+ void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint32_t sigs);
+
+ /* channel scheduling */
+ void (*tx_resume)(struct glink_transport_if *if_ptr);
+};
+
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+ struct glink_core_transport_cfg *cfg);
+
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle: The handle to the qos related node in DT.
+ * @cfg: The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+ struct glink_core_transport_cfg *cfg);
+
+/**
+ * rx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * iovec: Pointer to the beginning of the linear buffer.
+ * offset: Offset into the buffer whose address is needed.
+ * size: Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is received while the client has
+ * registered to receive vector buffers.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ * of the buffer.
+ */
+static inline void *rx_linear_vbuf_provider(void *iovec, size_t offset,
+ size_t *size)
+{
+ struct glink_core_rx_intent *rx_info =
+ (struct glink_core_rx_intent *)iovec;
+
+ if (unlikely(!iovec || !size))
+ return NULL;
+
+ if (unlikely(offset >= rx_info->pkt_size))
+ return NULL;
+
+ if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, rx_info->data, offset)))
+ return NULL;
+
+ *size = rx_info->pkt_size - offset;
+ return rx_info->data + offset;
+}
+
+#endif /* _SOC_QCOM_GLINK_CORE_IF_H_ */
diff --git a/drivers/soc/qcom/glink_debugfs.c b/drivers/soc/qcom/glink_debugfs.c
new file mode 100644
index 0000000000000..8e65e4ac9b8ea
--- /dev/null
+++ b/drivers/soc/qcom/glink_debugfs.c
@@ -0,0 +1,783 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <soc/qcom/glink.h>
+#include "glink_private.h"
+#include "glink_core_if.h"
+
+
+static const char * const ss_string[] = {
+ [GLINK_DBGFS_MPSS] = "mpss",
+ [GLINK_DBGFS_APSS] = "apss",
+ [GLINK_DBGFS_LPASS] = "lpass",
+ [GLINK_DBGFS_DSPS] = "dsps",
+ [GLINK_DBGFS_RPM] = "rpm",
+ [GLINK_DBGFS_WCNSS] = "wcnss",
+ [GLINK_DBGFS_LLOOP] = "lloop",
+ [GLINK_DBGFS_MOCK] = "mock"
+};
+
+static const char * const xprt_string[] = {
+ [GLINK_DBGFS_SMEM] = "smem",
+ [GLINK_DBGFS_SMD] = "smd",
+ [GLINK_DBGFS_XLLOOP] = "lloop",
+ [GLINK_DBGFS_XMOCK] = "mock",
+ [GLINK_DBGFS_XMOCK_LOW] = "mock_low",
+ [GLINK_DBGFS_XMOCK_HIGH] = "mock_high"
+};
+
+static const char * const ch_st_string[] = {
+ [GLINK_CHANNEL_CLOSED] = "CLOSED",
+ [GLINK_CHANNEL_OPENING] = "OPENING",
+ [GLINK_CHANNEL_OPENED] = "OPENED",
+ [GLINK_CHANNEL_CLOSING] = "CLOSING",
+};
+
+static const char * const xprt_st_string[] = {
+ [GLINK_XPRT_DOWN] = "DOWN",
+ [GLINK_XPRT_NEGOTIATING] = "NEGOT",
+ [GLINK_XPRT_OPENED] = "OPENED",
+ [GLINK_XPRT_FAILED] = "FAILED"
+};
+
+#if defined(CONFIG_DEBUG_FS)
+#define GLINK_DBGFS_NAME_SIZE (2 * GLINK_NAME_SIZE + 1)
+
+struct glink_dbgfs_dent {
+ struct list_head list_node;
+ char par_name[GLINK_DBGFS_NAME_SIZE];
+ char self_name[GLINK_DBGFS_NAME_SIZE];
+ struct dentry *parent;
+ struct dentry *self;
+ spinlock_t file_list_lock_lhb0;
+ struct list_head file_list;
+};
+
+static struct dentry *dent;
+static LIST_HEAD(dent_list);
+static DEFINE_MUTEX(dent_list_lock_lha0);
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ struct glink_dbgfs_data *dfs_d;
+ dfs_d = s->private;
+ dfs_d->o_func(s);
+ return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+ .open = debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+#endif
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id: enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(ss_string))
+ return NULL;
+
+ return ss_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ss_enum_string);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(xprt_string))
+ return NULL;
+ return xprt_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_enum_string);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(
+ enum transport_state_e enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(xprt_st_string))
+ return NULL;
+
+ return xprt_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_state_string);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(
+ enum local_channel_state_e enum_id)
+{
+ if (enum_id >= ARRAY_SIZE(ch_st_string))
+ return NULL;
+
+ return ch_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ch_state_string);
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * glink_dfs_create_file() - create the debugfs file
+ * @name: debugfs file name
+ * @parent: pointer to the parent dentry structure
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ *
+ * Return: pointer to the allocated glink_dbgfs_data structure or
+ * NULL in case of an error.
+ *
+ * This function actually create a debugfs file under the parent directory
+ */
+static struct glink_dbgfs_data *glink_dfs_create_file(const char *name,
+ struct dentry *parent, void (*show)(struct seq_file *s),
+ void *dbgfs_data, bool b_free_req)
+{
+ struct dentry *file;
+ struct glink_dbgfs_data *dfs_d;
+
+ dfs_d = kzalloc(sizeof(struct glink_dbgfs_data), GFP_KERNEL);
+ if (dfs_d == NULL)
+ return NULL;
+
+ dfs_d->o_func = show;
+ if (dbgfs_data != NULL) {
+ dfs_d->priv_data = dbgfs_data;
+ dfs_d->b_priv_free_req = b_free_req;
+ }
+ file = debugfs_create_file(name, 0400, parent, dfs_d, &debug_ops);
+ if (!file)
+ GLINK_DBG("%s: unable to create file '%s'\n", __func__,
+ name);
+ dfs_d->dent = file;
+ return dfs_d;
+}
+
+/**
+ * write_ch_intent() - write channel intent details
+ * @s: pointer to the sequential file
+ * @intent: pointer glink core intent structure
+ * @i_type: type of intent
+ * @count: serial number of the intent.
+ *
+ * This function is a helper function of glink_dfs_update_ch_intents()
+ * that prints out details of any specific intent.
+ */
+static void write_ch_intent(struct seq_file *s,
+ struct glink_core_rx_intent *intent,
+ char *i_type, unsigned int count)
+{
+ char *intent_type;
+ /*
+ * formatted, human readable channel state output, ie:
+ * TYPE |SN |ID |PKT_SIZE|W_OFFSET|INT_SIZE|
+ * --------------------------------------------------------------
+ * LOCAL_LIST|#2 |1 |0 |0 |8 |
+ */
+ if (count == 1) {
+ intent_type = i_type;
+ seq_puts(s,
+ "\n--------------------------------------------------------\n");
+ } else {
+ intent_type = "";
+ }
+ seq_printf(s, "%-20s|#%-5d|%-6u|%-10zu|%-10zu|%-10zu|\n",
+ intent_type,
+ count,
+ intent->id,
+ intent->pkt_size,
+ intent->write_offset,
+ intent->intent_size);
+}
+
+/**
+ * glink_dfs_update_ch_intent() - writes the intent details of a specific
+ * channel to the corresponding debugfs file
+ * @s: pointer to the sequential file
+ *
+ * This function extracts the intent details of a channel & prints them to
+ * corrseponding debugfs file of that channel.
+ */
+static void glink_dfs_update_ch_intent(struct seq_file *s)
+{
+ struct glink_dbgfs_data *dfs_d;
+ struct channel_ctx *ch_ctx;
+ struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *intent_temp;
+ struct glink_ch_intent_info ch_intent_info;
+ unsigned long flags;
+ unsigned int count = 0;
+
+ dfs_d = s->private;
+ ch_ctx = dfs_d->priv_data;
+ if (ch_ctx != NULL) {
+ glink_get_ch_intent_info(ch_ctx, &ch_intent_info);
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ seq_printf(s, "%-20s|%-6s|%-6s|%-10s|%-10s|%-10s|\n",
+ "INTENT TYPE",
+ "SN",
+ "ID",
+ "PKT_SIZE",
+ "W_OFFSET",
+ "INT_SIZE");
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ spin_lock_irqsave(ch_intent_info.li_lst_lock, flags);
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.li_avail_list, list) {
+ count++;
+ write_ch_intent(s, intent, "LOCAL_AVAIL_LIST", count);
+ }
+
+ count = 0;
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.li_used_list, list) {
+ count++;
+ write_ch_intent(s, intent, "LOCAL_USED_LIST", count);
+ }
+ spin_unlock_irqrestore(ch_intent_info.li_lst_lock, flags);
+
+ count = 0;
+ spin_lock_irqsave(ch_intent_info.ri_lst_lock, flags);
+ list_for_each_entry_safe(intent, intent_temp,
+ ch_intent_info.ri_list, list) {
+ count++;
+ write_ch_intent(s, intent, "REMOTE_LIST", count);
+ }
+ spin_unlock_irqrestore(ch_intent_info.ri_lst_lock,
+ flags);
+ seq_puts(s,
+ "---------------------------------------------------------------\n");
+ }
+}
+
+/**
+ * glink_dfs_update_ch_stats() - writes statistics of a specific
+ * channel to the corresponding debugfs file
+ * @s: pointer to the sequential file
+ *
+ * This function extracts other statistics of a channel & prints them to
+ * corrseponding debugfs file of that channel
+ */
+static void glink_dfs_update_ch_stats(struct seq_file *s)
+{
+ /* FUTURE: add channel statistics */
+ seq_puts(s, "not yet implemented\n");
+}
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ * debugfs when channel is fully closed
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx){
+
+ struct glink_dbgfs ch_rm_dbgfs;
+ char *edge_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+ char *xprt_name;
+
+ ch_rm_dbgfs.curr_name = glink_get_ch_name(ch_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n",
+ __func__, ch_rm_dbgfs.curr_name);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+ ch_rm_dbgfs.par_name = curr_dir_name;
+ glink_debugfs_remove_recur(&ch_rm_dbgfs);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ * debugfs when channel is added
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_dbgfs ch_dbgfs;
+ char *ch_name;
+ char *edge_name;
+ char *xprt_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+ if (ch_ctx == NULL) {
+ GLINK_ERR("%s: Channel Context is NULL\n", __func__);
+ return;
+ }
+
+ ch_name = glink_get_ch_name(ch_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: Invalid xprt_name or edge_name for ch '%s'\n",
+ __func__, ch_name);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+
+ ch_dbgfs.curr_name = curr_dir_name;
+ ch_dbgfs.par_name = "channel";
+ ch_dbgfs.b_dir_create = true;
+ glink_debugfs_create(ch_name, NULL, &ch_dbgfs, NULL, false);
+
+ ch_dbgfs.par_name = ch_dbgfs.curr_name;
+ ch_dbgfs.curr_name = ch_name;
+ ch_dbgfs.b_dir_create = false;
+ glink_debugfs_create("stats", glink_dfs_update_ch_stats,
+ &ch_dbgfs, (void *)ch_ctx, false);
+ glink_debugfs_create("intents", glink_dfs_update_ch_intent,
+ &ch_dbgfs, (void *)ch_ctx, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ * debugfs when new transport is registerd
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx)
+{
+ struct glink_dbgfs xprt_dbgfs;
+ char *xprt_name;
+ char *edge_name;
+ char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+ if (xprt_ctx == NULL)
+ GLINK_ERR("%s: Transport Context is NULL\n", __func__);
+ xprt_name = glink_get_xprt_name(xprt_ctx);
+ edge_name = glink_get_xprt_edge_name(xprt_ctx);
+ if (!xprt_name || !edge_name) {
+ GLINK_ERR("%s: xprt name or edge name is NULL\n", __func__);
+ return;
+ }
+ snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+ edge_name, xprt_name);
+ xprt_dbgfs.par_name = "glink";
+ xprt_dbgfs.curr_name = "xprt";
+ xprt_dbgfs.b_dir_create = true;
+ glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+ xprt_dbgfs.curr_name = "channel";
+ glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+/**
+ * glink_dfs_create_channel_list() - create & update the channel details
+ * s: pointer to seq_file
+ *
+ * This function updates channel details in debugfs
+ * file present in /glink/channel/channels
+ */
+static void glink_dfs_create_channel_list(struct seq_file *s)
+{
+ struct xprt_ctx_iterator xprt_iter;
+ struct ch_ctx_iterator ch_iter;
+
+ struct glink_core_xprt_ctx *xprt_ctx;
+ struct channel_ctx *ch_ctx;
+ int count = 0;
+ /*
+ * formatted, human readable channel state output, ie:
+ * NAME |LCID|RCID|XPRT|EDGE|LSTATE |RSTATE|LINT-Q|RINT-Q|
+ * --------------------------------------------------------------------
+ * LOCAL_LOOPBACK_CLNT|2 |1 |lloop|local|OPENED|OPENED|5 |6 |
+ * N.B. Number of TX & RX Packets not implemented yet. -ENOSYS is printed
+ */
+ seq_printf(s, "%-20s|%-4s|%-4s|%-10s|%-6s|%-7s|%-7s|%-5s|%-5s|\n",
+ "NAME",
+ "LCID",
+ "RCID",
+ "XPRT",
+ "EDGE",
+ "LSTATE",
+ "RSTATE",
+ "LINTQ",
+ "RINTQ");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ glink_xprt_ctx_iterator_init(&xprt_iter);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ while (xprt_ctx != NULL) {
+ glink_ch_ctx_iterator_init(&ch_iter, xprt_ctx);
+ ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+ while (ch_ctx != NULL) {
+ count++;
+ seq_printf(s, "%-20s|%-4i|%-4i|%-10s|%-6s|%-7s|",
+ glink_get_ch_name(ch_ctx),
+ glink_get_ch_lcid(ch_ctx),
+ glink_get_ch_rcid(ch_ctx),
+ glink_get_ch_xprt_name(ch_ctx),
+ glink_get_ch_edge_name(ch_ctx),
+ glink_get_ch_lstate(ch_ctx));
+ seq_printf(s, "%-7s|%-5i|%-5i|\n",
+ (glink_get_ch_rstate(ch_ctx) ? "OPENED" : "CLOSED"),
+ glink_get_ch_lintents_queued(ch_ctx),
+ glink_get_ch_rintents_queued(ch_ctx));
+
+ ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+ }
+ glink_ch_ctx_iterator_end(&ch_iter, xprt_ctx);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+ }
+
+ glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_create_xprt_list() - create & update the transport details
+ * @s: pointer to seq_file
+ *
+ * This function updates channel details in debugfs file present
+ * in /glink/xprt/xprts
+ */
+static void glink_dfs_create_xprt_list(struct seq_file *s)
+{
+ struct xprt_ctx_iterator xprt_iter;
+ struct glink_core_xprt_ctx *xprt_ctx;
+ const struct glink_core_version *gver;
+ uint32_t version;
+ uint32_t features;
+ int count = 0;
+ /*
+ * formatted, human readable channel state output, ie:
+ * XPRT_NAME|REMOTE |STATE|VERSION |FEATURES|
+ * ---------------------------------------------
+ * smd_trans|lpass |2 |0 |1 |
+ * smem |mpss |0 |0 |0 |
+ */
+ seq_printf(s, "%-20s|%-20s|%-6s|%-8s|%-8s|\n",
+ "XPRT_NAME",
+ "REMOTE",
+ "STATE",
+ "VERSION",
+ "FEATURES");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ glink_xprt_ctx_iterator_init(&xprt_iter);
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ while (xprt_ctx != NULL) {
+ count++;
+ seq_printf(s, "%-20s|%-20s|",
+ glink_get_xprt_name(xprt_ctx),
+ glink_get_xprt_edge_name(xprt_ctx));
+ gver = glink_get_xprt_version_features(xprt_ctx);
+ if (gver != NULL) {
+ version = gver->version;
+ features = gver->features;
+ seq_printf(s, "%-6s|%-8i|%-8i|\n",
+ glink_get_xprt_state(xprt_ctx),
+ version,
+ features);
+ } else {
+ seq_printf(s, "%-6s|%-8i|%-8i|\n",
+ glink_get_xprt_state(xprt_ctx),
+ -ENODATA,
+ -ENODATA);
+ }
+ xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+ }
+
+ glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_update_list() - update the internally maintained dentry linked list
+ * @curr_dent: pointer to the current dentry object
+ * @parent: pointer to the parent dentry object
+ * @curr: current directory name
+ * @par_dir: parent directory name
+ */
+void glink_dfs_update_list(struct dentry *curr_dent, struct dentry *parent,
+ const char *curr, const char *par_dir)
+{
+ struct glink_dbgfs_dent *dbgfs_dent_s;
+ if (curr_dent != NULL) {
+ dbgfs_dent_s = kzalloc(sizeof(struct glink_dbgfs_dent),
+ GFP_KERNEL);
+ if (dbgfs_dent_s != NULL) {
+ INIT_LIST_HEAD(&dbgfs_dent_s->file_list);
+ spin_lock_init(&dbgfs_dent_s->file_list_lock_lhb0);
+ dbgfs_dent_s->parent = parent;
+ dbgfs_dent_s->self = curr_dent;
+ strlcpy(dbgfs_dent_s->self_name,
+ curr, strlen(curr) + 1);
+ strlcpy(dbgfs_dent_s->par_name, par_dir,
+ strlen(par_dir) + 1);
+ mutex_lock(&dent_list_lock_lha0);
+ list_add_tail(&dbgfs_dent_s->list_node, &dent_list);
+ mutex_unlock(&dent_list_lock_lha0);
+ }
+ } else {
+ GLINK_DBG("%s:create directory failed for par:curr [%s:%s]\n",
+ __func__, par_dir, curr);
+ }
+ return;
+}
+
+/**
+ * glink_remove_dfs_entry() - remove the the entries from dent_list
+ * @entry: pointer to the glink_dbgfs_dent structure
+ *
+ * This function removes the removes the entries from internally maintained
+ * linked list of dentries. It also deletes the file list and associated memory
+ * if present.
+ */
+void glink_remove_dfs_entry(struct glink_dbgfs_dent *entry)
+{
+ struct glink_dbgfs_data *fentry, *fentry_temp;
+ unsigned long flags;
+
+ if (entry == NULL)
+ return;
+ if (!list_empty(&entry->file_list)) {
+ spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+ list_for_each_entry_safe(fentry, fentry_temp,
+ &entry->file_list, flist) {
+ if (fentry->b_priv_free_req)
+ kfree(fentry->priv_data);
+ list_del(&fentry->flist);
+ kfree(fentry);
+ fentry = NULL;
+ }
+ spin_unlock_irqrestore(&entry->file_list_lock_lhb0, flags);
+ }
+ list_del(&entry->list_node);
+ kfree(entry);
+ entry = NULL;
+}
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs: pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories below the given directory.
+ * This also takes care of freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *rm_dfs)
+{
+ const char *c_dir_name;
+ const char *p_dir_name;
+ struct glink_dbgfs_dent *entry, *entry_temp;
+ struct dentry *par_dent = NULL;
+
+ if (rm_dfs == NULL)
+ return;
+
+ c_dir_name = rm_dfs->curr_name;
+ p_dir_name = rm_dfs->par_name;
+
+ mutex_lock(&dent_list_lock_lha0);
+ list_for_each_entry_safe(entry, entry_temp, &dent_list, list_node) {
+ if (!strcmp(entry->par_name, c_dir_name)) {
+ glink_remove_dfs_entry(entry);
+ } else if (!strcmp(entry->self_name, c_dir_name)
+ && !strcmp(entry->par_name, p_dir_name)) {
+ par_dent = entry->self;
+ glink_remove_dfs_entry(entry);
+ }
+ }
+ mutex_unlock(&dent_list_lock_lha0);
+ if (par_dent != NULL)
+ debugfs_remove_recursive(par_dent);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name: debugfs file name
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ * @dir: pointer to a structure debugfs_dir
+ * dbgfs_data: pointer to any private data need to be associated with debugfs
+ * b_free_req: boolean value to decide to free the memory associated with
+ * @dbgfs_data during deletion of the file
+ *
+ * Return: pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+ void (*show)(struct seq_file *),
+ struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req)
+{
+ struct dentry *parent = NULL;
+ struct dentry *dent = NULL;
+ struct glink_dbgfs_dent *entry;
+ struct glink_dbgfs_data *file_data;
+ const char *c_dir_name;
+ const char *p_dir_name;
+ unsigned long flags;
+
+ if (dir == NULL) {
+ GLINK_ERR("%s: debugfs_dir strucutre is null\n", __func__);
+ return NULL;
+ }
+ c_dir_name = dir->curr_name;
+ p_dir_name = dir->par_name;
+
+ mutex_lock(&dent_list_lock_lha0);
+ list_for_each_entry(entry, &dent_list, list_node)
+ if (!strcmp(entry->par_name, p_dir_name)
+ && !strcmp(entry->self_name, c_dir_name)) {
+ parent = entry->self;
+ break;
+ }
+ mutex_unlock(&dent_list_lock_lha0);
+ p_dir_name = c_dir_name;
+ c_dir_name = name;
+ if (parent != NULL) {
+ if (dir->b_dir_create) {
+ dent = debugfs_create_dir(name, parent);
+ if (dent != NULL)
+ glink_dfs_update_list(dent, parent,
+ c_dir_name, p_dir_name);
+ } else {
+ file_data = glink_dfs_create_file(name, parent, show,
+ dbgfs_data, b_free_req);
+ spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+ if (file_data != NULL)
+ list_add_tail(&file_data->flist,
+ &entry->file_list);
+ spin_unlock_irqrestore(&entry->file_list_lock_lhb0,
+ flags);
+ }
+ } else {
+ GLINK_DBG("%s: parent dentry is null for [%s]\n",
+ __func__, name);
+ }
+ return dent;
+}
+EXPORT_SYMBOL(glink_debugfs_create);
+
+/**
+ * glink_debugfs_init() - initialize the glink debugfs directory structure
+ *
+ * Return: 0 in success otherwise appropriate error code
+ *
+ * This function initializes the debugfs directory for glink
+ */
+int glink_debugfs_init(void)
+{
+ struct glink_dbgfs dbgfs;
+
+ /* fake parent name */
+ dent = debugfs_create_dir("glink", NULL);
+ if (IS_ERR_OR_NULL(dent))
+ return PTR_ERR(dent);
+
+ glink_dfs_update_list(dent, NULL, "glink", "root");
+
+ dbgfs.b_dir_create = true;
+ dbgfs.curr_name = "glink";
+ dbgfs.par_name = "root";
+ glink_debugfs_create("xprt", NULL, &dbgfs, NULL, false);
+ glink_debugfs_create("channel", NULL, &dbgfs, NULL, false);
+
+ dbgfs.curr_name = "channel";
+ dbgfs.par_name = "glink";
+ dbgfs.b_dir_create = false;
+ glink_debugfs_create("channels", glink_dfs_create_channel_list,
+ &dbgfs, NULL, false);
+ dbgfs.curr_name = "xprt";
+ glink_debugfs_create("xprts", glink_dfs_create_xprt_list,
+ &dbgfs, NULL, false);
+
+ return 0;
+}
+EXPORT_SYMBOL(glink_debugfs_init);
+
+/**
+ * glink_debugfs_exit() - removes the glink debugfs directory
+ *
+ * This function recursively remove all the debugfs directories
+ * starting from dent
+ */
+void glink_debugfs_exit(void)
+{
+ if (dent != NULL)
+ debugfs_remove_recursive(dent);
+}
+EXPORT_SYMBOL(glink_debugfs_exit);
+#else
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs) { }
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+int glink_debugfs_init(void) { return 0; }
+EXPORT_SYMBOL(glink_debugfs_init);
+
+void glink_debugfs_exit(void) { }
+EXPORT_SYMBOL(glink_debugfs_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
new file mode 100644
index 0000000000000..d79893227cd26
--- /dev/null
+++ b/drivers/soc/qcom/glink_private.h
@@ -0,0 +1,1044 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_PRIVATE_H_
+#define _SOC_QCOM_GLINK_PRIVATE_H_
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <soc/qcom/glink.h>
+
+struct glink_core_xprt_ctx;
+struct channel_ctx;
+enum transport_state_e;
+enum local_channel_state_e;
+
+/* Logging Macros */
+enum {
+ QCOM_GLINK_INFO = 1U << 0,
+ QCOM_GLINK_DEBUG = 1U << 1,
+ QCOM_GLINK_GPIO = 1U << 2,
+ QCOM_GLINK_PERF = 1U << 3,
+};
+
+enum glink_dbgfs_ss {
+ GLINK_DBGFS_MPSS,
+ GLINK_DBGFS_APSS,
+ GLINK_DBGFS_LPASS,
+ GLINK_DBGFS_DSPS,
+ GLINK_DBGFS_RPM,
+ GLINK_DBGFS_WCNSS,
+ GLINK_DBGFS_LLOOP,
+ GLINK_DBGFS_MOCK,
+ GLINK_DBGFS_MAX_NUM_SUBS
+};
+
+enum glink_dbgfs_xprt {
+ GLINK_DBGFS_SMEM,
+ GLINK_DBGFS_SMD,
+ GLINK_DBGFS_XLLOOP,
+ GLINK_DBGFS_XMOCK,
+ GLINK_DBGFS_XMOCK_LOW,
+ GLINK_DBGFS_XMOCK_HIGH,
+ GLINK_DBGFS_MAX_NUM_XPRTS
+};
+
+struct glink_dbgfs {
+ const char *curr_name;
+ const char *par_name;
+ bool b_dir_create;
+};
+
+struct glink_dbgfs_data {
+ struct list_head flist;
+ struct dentry *dent;
+ void (*o_func)(struct seq_file *s);
+ void *priv_data;
+ bool b_priv_free_req;
+};
+
+struct xprt_ctx_iterator {
+ struct list_head *xprt_list;
+ struct glink_core_xprt_ctx *i_curr;
+ unsigned long xprt_list_flags;
+};
+
+struct ch_ctx_iterator {
+ struct list_head *ch_list;
+ struct channel_ctx *i_curr;
+ unsigned long ch_list_flags;
+};
+
+struct glink_ch_intent_info {
+ spinlock_t *li_lst_lock;
+ struct list_head *li_avail_list;
+ struct list_head *li_used_list;
+ spinlock_t *ri_lst_lock;
+ struct list_head *ri_list;
+};
+
+/* Tracer Packet Event IDs for G-Link */
+enum glink_tracer_pkt_events {
+ GLINK_CORE_TX = 1,
+ GLINK_QUEUE_TO_SCHEDULER = 2,
+ GLINK_SCHEDULER_TX = 3,
+ GLINK_XPRT_TX = 4,
+ GLINK_XPRT_RX = 5,
+ GLINK_CORE_RX = 6,
+};
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum value
+ * @enum_id: enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(enum transport_state_e enum_id);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum value
+ * @enum_id: enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(enum local_channel_state_e enum_id);
+
+#define GLINK_IPC_LOG_STR(x...) do { \
+ if (glink_get_log_ctx()) \
+ ipc_log_string(glink_get_log_ctx(), x); \
+} while (0)
+
+#define GLINK_DBG(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_INFO(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_INFO_PERF(x...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR(x); \
+} while (0)
+
+#define GLINK_PERF(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> " x); \
+} while (0)
+
+#define GLINK_UT_ERR(x...) do { \
+ if (!(glink_get_debug_mask() & QCOM_GLINK_PERF)) \
+ pr_err("<UT> " x); \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_DBG(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_INFO(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_INFO_PERF(x...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_IPC_LOG_STR("<UT> " x); \
+} while (0)
+
+#define GLINK_UT_PERF(x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_IPC_LOG_STR("<PERF> " x); \
+} while (0)
+
+#define GLINK_XPRT_IPC_LOG_STR(xprt, x...) do { \
+ if (glink_get_xprt_log_ctx(xprt)) \
+ ipc_log_string(glink_get_xprt_log_ctx(xprt), x); \
+} while (0)
+
+#define GLINK_XPRT_IF_INFO(xprt_if, x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_DBG(xprt_if, x...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_ERR(xprt_if, x...) do { \
+ pr_err("<XPRT> " x); \
+ GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_PERF_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, "<PERF> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_PERF_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+ "<PERF> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, \
+ "<PERF> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+ GLINK_XPRT_IPC_LOG_STR(xprt,\
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_INFO_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, \
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_DBG_XPRT(xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+} while (0)
+
+#define GLINK_DBG_CH(ctx, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_DBG_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+ GLINK_XPRT_IPC_LOG_STR(xprt, \
+ "<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+} while (0)
+
+#define GLINK_ERR(x...) do { \
+ pr_err_ratelimited("<CORE> " x); \
+ GLINK_IPC_LOG_STR("<CORE> " x); \
+} while (0)
+
+#define GLINK_ERR_XPRT(xprt, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s " fmt, \
+ xprt->name, xprt->edge, args); \
+ GLINK_INFO_XPRT(xprt, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH(ctx, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ ctx->transport_ptr->name, \
+ ctx->transport_ptr->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+ GLINK_INFO_CH(ctx, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH_XPRT(ctx, xprt, fmt, args...) do { \
+ pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+ xprt->name, \
+ xprt->edge, \
+ ctx->name, \
+ ctx->lcid, \
+ ctx->rcid, args); \
+ GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args); \
+} while (0)
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * type: type to check for overflow
+ * a: left value to use
+ * b: right value to use
+ * returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+ (((type)~0 - (a)) < (b) ? true : false)
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned glink_get_debug_mask(void);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle: handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return: Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle);
+
+/**
+ * glink_debugfs_init() - initialize glink debugfs directory
+ *
+ * Return: error code or success.
+ */
+int glink_debugfs_init(void);
+
+/**
+ * glink_debugfs_exit() - removes glink debugfs directory
+ */
+void glink_debugfs_exit(void);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name: debugfs file name
+ * @show: pointer to the actual function which will be invoked upon
+ * opening this file.
+ * @dir: pointer to a structure debugfs_dir
+ * @dbgfs_data: pointer to any private data need to be associated with debugfs
+ * @b_free_req: boolean value to decide to free the memory associated with
+ * @dbgfs_data during deletion of the file
+ *
+ * Return: pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+ void (*show)(struct seq_file *),
+ struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req);
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs: pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories. This also takes care of
+ * freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs);
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specifc files & folder in
+ * debugfs when channel is fully closed
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_channel() - create channel specifc files & folder in
+ * debugfs when channel is added
+ * @ch_ctx: pointer to the channel_contenxt
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specifc files & folder in
+ * debugfs when new transport is registerd
+ * @xprt_ctx: pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list iterator
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: None
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: None
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport context list
+ * @xprt_i: pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+ struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_edge_name() - get the name of the remote processor/edge
+ * of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ * of local transport in glink
+ * @xprt_ctx: pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+ struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter: pointer to the channel context iterator.
+ * @xprt: pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter: pointer to the channel context iterator.
+ *
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+ struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel context list
+ * @c_i: pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *ch_iter);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_edge_name() - get the name of the remote processor/edge
+ * of the channel
+ * @xprt_ctx: pointer to the channel context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the local channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ * the channel belongs
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ * through this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ * recieved at this channel
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of packets recieved, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ * at local side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ * from remote side
+ * @ch_ctx: pointer to the channel context.
+ *
+ * Return: number of intents queued
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx: pointer to the channel context.
+ * @ch_ctx_i: pointer to a structure that will contain intent details
+ *
+ * This funcion is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+ struct glink_ch_intent_info *ch_ctx_i);
+
+/**
+ * enum ssr_command - G-Link SSR protocol commands
+ */
+enum ssr_command {
+ GLINK_SSR_DO_CLEANUP,
+ GLINK_SSR_CLEANUP_DONE,
+};
+
+/**
+ * struct ssr_notify_data - Contains private data used for client notifications
+ * from G-Link.
+ * tx_done: Indicates whether or not the tx_done notification has
+ * been received.
+ * event: The state notification event received.
+ * responded: Indicates whether or not a cleanup_done response was
+ * received.
+ * edge: The G-Link edge name for the channel associated with
+ * this callback data
+ * do_cleanup_data: Structure containing the G-Link SSR do_cleanup message.
+ */
+struct ssr_notify_data {
+ bool tx_done;
+ unsigned event;
+ bool responded;
+ const char *edge;
+ struct do_cleanup_msg *do_cleanup_data;
+};
+
+/**
+ * struct subsys_info - Subsystem info structure
+ * ssr_name: name of the subsystem recognized by the SSR framework
+ * edge: name of the G-Link edge
+ * xprt: name of the G-Link transport
+ * handle: glink_ssr channel used for this subsystem
+ * link_state_handle: link state handle for this edge, used to unregister
+ * from receiving link state callbacks
+ * link_info: Transport info used in link state callback registration
+ * cb_data: Private callback data structure for notification
+ * functions
+ * subsystem_list_node: used to chain this structure in a list of subsystem
+ * info structures
+ * notify_list: list of subsys_info_leaf structures, containing the
+ * subsystems to notify if this subsystem undergoes SSR
+ * notify_list_len: length of notify_list
+ * link_up: Flag indicating whether transport is up or not
+ * link_up_lock: Lock for protecting the link_up flag
+ */
+struct subsys_info {
+ const char *ssr_name;
+ const char *edge;
+ const char *xprt;
+ void *handle;
+ void *link_state_handle;
+ struct glink_link_info *link_info;
+ struct ssr_notify_data *cb_data;
+ struct list_head subsystem_list_node;
+ struct list_head notify_list;
+ int notify_list_len;
+ bool link_up;
+ spinlock_t link_up_lock;
+};
+
+/**
+ * struct subsys_info_leaf - Subsystem info leaf structure (a subsystem on the
+ * notify list of a subsys_info structure)
+ * ssr_name: Name of the subsystem recognized by the SSR framework
+ * edge: Name of the G-Link edge
+ * xprt: Name of the G-Link transport
+ * restarted: Indicates whether a restart has been triggered for this edge
+ * cb_data: Private callback data structure for notification functions
+ * notify_list_node: used to chain this structure in the notify list
+ */
+struct subsys_info_leaf {
+ const char *ssr_name;
+ const char *edge;
+ const char *xprt;
+ bool restarted;
+ struct ssr_notify_data *cb_data;
+ struct list_head notify_list_node;
+};
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version: The G-Link SSR protocol version
+ * command: The G-Link SSR command - do_cleanup
+ * seq_num: Sequence number
+ * name_len: Length of the name of the subsystem being restarted
+ * name: G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+ uint32_t version;
+ uint32_t command;
+ uint32_t seq_num;
+ uint32_t name_len;
+ char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version: The G-Link SSR protocol version
+ * response: The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num: Sequence number
+ */
+struct cleanup_done_msg {
+ uint32_t version;
+ uint32_t response;
+ uint32_t seq_num;
+};
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @subsystem: The name of the subsystem recognized by the SSR
+ * framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ * global subsystem_info_list
+ * @edge: The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ * NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void);
+
+/*
+ * glink_ssr() - SSR cleanup function.
+ *
+ * Return: Standard error code.
+ */
+int glink_ssr(const char *subsystem);
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ * restarted
+ * @ss_info: Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info);
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ * notifications_successful flag in glink_ssr.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned ssr_timeout_multiplier);
+
+struct channel_lcid {
+ struct list_head list_node;
+ uint32_t lcid;
+};
+
+/**
+ * struct rwref_lock - Read/Write Reference Lock
+ *
+ * kref: reference count
+ * read_count: number of readers that own the lock
+ * write_count: number of writers (max 1) that own the lock
+ * count_zero: used for internal signaling for non-atomic locks
+ *
+ * A Read/Write Reference Lock is a combination of a read/write spinlock and a
+ * refence count. The main difference is that no locks are held in the
+ * critical section and the lifetime of the object is guaranteed.
+ *
+ * Read Locking
+ * Multiple readers may access the lock at any given time and a read lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_read_get()
+ * use resource in "critical section" (no locks are held)
+ * rwref_read_put()
+ *
+ * Write Locking
+ * A single writer may access the lock at any given time and a write lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_write_get()
+ * use resource in "critical section" (no locks are held)
+ * rwref_write_put()
+ *
+ * Reference Lock
+ * To ensure the lifetime of the lock (and not affect the read or write lock),
+ * a simple reference can be done. By default, rwref_lock_init() will set the
+ * reference count to 1.
+ *
+ * rwref_lock_init() Reference count is 1
+ * rwref_get() Reference count is 2
+ * rwref_put() Reference count is 1
+ * rwref_put() Reference count goes to 0 and object is destroyed
+ */
+struct rwref_lock {
+ struct kref kref;
+ unsigned read_count;
+ unsigned write_count;
+ spinlock_t lock;
+ struct completion count_zero;
+
+ void (*release)(struct rwref_lock *);
+};
+
+/**
+ * rwref_lock_release() - Initialize rwref_lock
+ * lock_ptr: pointer to lock structure
+ */
+static inline void rwref_lock_release(struct kref *kref_ptr)
+{
+ struct rwref_lock *lock_ptr;
+
+ BUG_ON(kref_ptr == NULL);
+
+ lock_ptr = container_of(kref_ptr, struct rwref_lock, kref);
+ if (lock_ptr->release)
+ lock_ptr->release(lock_ptr);
+}
+
+/**
+ * rwref_lock_init() - Initialize rwref_lock
+ * lock_ptr: pointer to lock structure
+ * release: release function called when reference count goes to 0
+ */
+static inline void rwref_lock_init(struct rwref_lock *lock_ptr,
+ void (*release)(struct rwref_lock *))
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_init(&lock_ptr->kref);
+ lock_ptr->read_count = 0;
+ lock_ptr->write_count = 0;
+ spin_lock_init(&lock_ptr->lock);
+ init_completion(&lock_ptr->count_zero);
+ lock_ptr->release = release;
+}
+
+/**
+ * rwref_get() - gains a reference count for the object
+ * lock_ptr: pointer to lock structure
+ */
+static inline void rwref_get(struct rwref_lock *lock_ptr)
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+}
+
+/**
+ * rwref_put() - puts a reference count for the object
+ * lock_ptr: pointer to lock structure
+ *
+ * If the reference count goes to zero, the release function is called.
+ */
+static inline void rwref_put(struct rwref_lock *lock_ptr)
+{
+ BUG_ON(lock_ptr == NULL);
+
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_read_get() - gains a reference count for a read operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+ while (1) {
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ if (lock_ptr->write_count == 0) {
+ lock_ptr->read_count++;
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ wait_for_completion(&lock_ptr->count_zero);
+ }
+}
+
+/**
+ * rwref_read_put() - returns a reference count for a read operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_read_get().
+ */
+static inline void rwref_read_put(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ BUG_ON(lock_ptr->read_count == 0);
+ if (--lock_ptr->read_count == 0)
+ complete(&lock_ptr->count_zero);
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_write_get() - gains a reference count for a write operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ kref_get(&lock_ptr->kref);
+ while (1) {
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ if (lock_ptr->read_count == 0 && lock_ptr->write_count == 0) {
+ lock_ptr->write_count++;
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ wait_for_completion(&lock_ptr->count_zero);
+ }
+}
+
+/**
+ * rwref_write_put() - returns a reference count for a write operation
+ * lock_ptr: pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_write_get().
+ */
+static inline void rwref_write_put(struct rwref_lock *lock_ptr)
+{
+ unsigned long flags;
+
+ BUG_ON(lock_ptr == NULL);
+
+ spin_lock_irqsave(&lock_ptr->lock, flags);
+ BUG_ON(lock_ptr->write_count != 1);
+ if (--lock_ptr->write_count == 0)
+ complete(&lock_ptr->count_zero);
+ spin_unlock_irqrestore(&lock_ptr->lock, flags);
+ kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+#endif /* _SOC_QCOM_GLINK_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
new file mode 100644
index 0000000000000..2796493dfee2f
--- /dev/null
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -0,0 +1,2500 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+#include <linux/soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
+
+#define XPRT_NAME "smem"
+#define FIFO_FULL_RESERVE 8
+#define FIFO_ALIGNMENT 8
+#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
+#define RPM_TOC_ID 0x67727430
+#define RPM_TX_FIFO_ID 0x61703272
+#define RPM_RX_FIFO_ID 0x72326170
+#define RPM_TOC_SIZE 256
+#define RPM_MAX_TOC_ENTRIES 20
+#define RPM_FIFO_ADDR_ALIGN_BYTES 3
+#define TRACER_PKT_FEATURE BIT(2)
+
+static struct device *glink_dev;
+static struct completion glink_ack;
+static struct bus_type *ipc_bus;
+
+#define GLINK_RPM_REQUEST_TIMEOUT 5*HZ
+
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD: Version and feature set supported
+ * @VERSION_ACK_CMD: Response for @VERSION_CMD
+ * @OPEN_CMD: Open a channel
+ * @CLOSE_CMD: Close a channel
+ * @OPEN_ACK_CMD: Response to @OPEN_CMD
+ * @RX_INTENT_CMD: RX intent for a channel was queued
+ * @RX_DONE_CMD: Use of RX intent for a channel is complete
+ * @RX_INTENT_REQ_CMD: Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD: Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD: Start of a data transfer
+ * @ZERO_COPY_TX_DATA_CMD: Start of a data transfer with zero copy
+ * @CLOSE_ACK_CMD: Response for @CLOSE_CMD
+ * @TX_DATA_CONT_CMD: Continuation or end of a data transfer
+ * @READ_NOTIF_CMD: Request for a notification when this cmd is read
+ * @RX_DONE_W_REUSE_CMD: Same as @RX_DONE but also reuse the used intent
+ * @SIGNALS_CMD: Sideband signals
+ * @TRACER_PKT_CMD: Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD: Continuation or end of a Tracer Packet Command
+ */
+enum command_types {
+ VERSION_CMD,
+ VERSION_ACK_CMD,
+ OPEN_CMD,
+ CLOSE_CMD,
+ OPEN_ACK_CMD,
+ RX_INTENT_CMD,
+ RX_DONE_CMD,
+ RX_INTENT_REQ_CMD,
+ RX_INTENT_REQ_ACK_CMD,
+ TX_DATA_CMD,
+ ZERO_COPY_TX_DATA_CMD,
+ CLOSE_ACK_CMD,
+ TX_DATA_CONT_CMD,
+ READ_NOTIF_CMD,
+ RX_DONE_W_REUSE_CMD,
+ SIGNALS_CMD,
+ TRACER_PKT_CMD,
+ TRACER_PKT_CONT_CMD,
+};
+
+/**
+ * struct channel_desc - description of a channel fifo with a remote entity
+ * @read_index: The read index for the fifo where data should be
+ * consumed from.
+ * @write_index: The write index for the fifo where data should produced
+ * to.
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * fifo data pipes of the channel. There is one physical channel between us
+ * and a remote entity.
+ */
+struct channel_desc {
+ uint32_t read_index;
+ uint32_t write_index;
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if: The transport interface registered with the
+ * glink core associated with this edge.
+ * @xprt_cfg: The transport configuration for the glink core
+ * assocaited with this edge.
+ * @intentless: True if this edge runs in intentless mode.
+ * @irq_disabled: Flag indicating the whether interrupt is enabled
+ * or disabled.
+ * @rx_reset_reg: Reference to the register to reset the rx irq
+ * line, if applicable.
+ * @out_irq_reg: Reference to the register to send an irq to the
+ * remote side.
+ * @out_irq_mask: Mask written to @out_irq_reg to trigger the
+ * correct irq.
+ * @irq_line: The incoming interrupt line.
+ * @tx_irq_count: Number of interrupts triggered.
+ * @rx_irq_count: Number of interrupts received.
+ * @tx_ch_desc: Reference to the channel description structure
+ * for tx in SMEM for this edge.
+ * @rx_ch_desc: Reference to the channel description structure
+ * for rx in SMEM for this edge.
+ * @tx_fifo: Reference to the transmit fifo in SMEM.
+ * @rx_fifo: Reference to the receive fifo in SMEM.
+ * @tx_fifo_size: Total size of @tx_fifo.
+ * @rx_fifo_size: Total size of @rx_fifo.
+ * @read_from_fifo: Memcpy for this edge.
+ * @write_to_fifo: Memcpy for this edge.
+ * @write_lock: Lock to serialize access to @tx_fifo.
+ * @tx_blocked_queue: Queue of entities waiting for the remote side to
+ * signal @tx_fifo has flushed and is now empty.
+ * @tx_resume_needed: A tx resume signal needs to be sent to the glink
+ * core once the remote side indicates @tx_fifo has
+ * flushed.
+ * @tx_blocked_signal_sent: Flag to indicate the flush signal has already
+ * been sent, and a response is pending from the
+ * remote side. Protected by @write_lock.
+ * @kwork: Work to be executed when an irq is received.
+ * @kworker: Handle to the entity processing @kwork.
+ * @task: Handle to the task context used to run @kworker.
+ * @use_ref: Active uses of this transport use this to grab
+ * a reference. Used for ssr synchronization.
+ * @rx_lock: Used to serialize concurrent instances of rx
+ * processing.
+ * @deferred_cmds: List of deferred commands that need to be
+ * processed in process context.
+ * @num_pw_states: Size of @ramp_time_us.
+ * @ramp_time_us: Array of ramp times in microseconds where array
+ * index position represents a power state.
+ */
+struct edge_info {
+ struct glink_transport_if xprt_if;
+ struct glink_core_transport_cfg xprt_cfg;
+ bool intentless;
+ bool irq_disabled;
+ void __iomem *rx_reset_reg;
+ void __iomem *out_irq_reg;
+ uint32_t out_irq_mask;
+ uint32_t irq_line;
+ uint32_t tx_irq_count;
+ uint32_t rx_irq_count;
+ struct channel_desc *tx_ch_desc;
+ struct channel_desc *rx_ch_desc;
+ void __iomem *tx_fifo;
+ void __iomem *rx_fifo;
+ uint32_t tx_fifo_size;
+ uint32_t rx_fifo_size;
+ void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
+ void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
+ spinlock_t write_lock;
+ wait_queue_head_t tx_blocked_queue;
+ bool tx_resume_needed;
+ bool tx_blocked_signal_sent;
+ struct kthread_work kwork;
+ struct kthread_worker kworker;
+ struct task_struct *task;
+ struct srcu_struct use_ref;
+ spinlock_t rx_lock;
+ struct list_head deferred_cmds;
+ uint32_t num_pw_states;
+ unsigned long *ramp_time_us;
+};
+
+/**
+ * struct deferred_cmd - description of a command to be processed later
+ * @list_node: Used to put this command on a list in the edge.
+ * @id: ID of the command.
+ * @param1: Parameter one of the command.
+ * @param2: Parameter two of the command.
+ * @data: Extra data associated with the command, if applicable.
+ *
+ * This structure stores the relevant information of a command that was removed
+ * from the fifo but needs to be processed at a later time.
+ */
+struct deferred_cmd {
+ struct list_head list_node;
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ void *data;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features);
+static void register_debugfs_info(struct edge_info *einfo);
+
+static DEFINE_MUTEX(probe_lock);
+static struct glink_core_version versions[] = {
+ {1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal
+ * @einfo: Which remote entity that should receive the irq.
+ */
+static void send_irq(struct edge_info *einfo)
+{
+ /*
+ * Any data associated with this event must be visable to the remote
+ * before the interrupt is triggered
+ */
+ wmb();
+ writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
+ einfo->tx_irq_count++;
+}
+
+/**
+ * memcpy32_toio() - memcpy to word access only memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ __raw_writel(*src_local++, dest_local++);
+
+ return dest;
+}
+
+/**
+ * memcpy32_fromio() - memcpy from word access only memory
+ * @dest: Destination address.
+ * @src: Source address.
+ * @num_bytes: Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
+{
+ uint32_t *dest_local = (uint32_t *)dest;
+ uint32_t *src_local = (uint32_t *)src;
+
+ BUG_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES);
+ BUG_ON(!dest_local ||
+ ((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ BUG_ON(!src_local ||
+ ((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES));
+ num_bytes /= sizeof(uint32_t);
+
+ while (num_bytes--)
+ *dest_local++ = __raw_readl(src_local++);
+
+ return dest;
+}
+
+/**
+ * fifo_read_avail() - how many bytes are available to be read from an edge
+ * @einfo: The concerned edge to query.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_read_avail(struct edge_info *einfo)
+{
+ uint32_t read_index = einfo->rx_ch_desc->read_index;
+ uint32_t write_index = einfo->rx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->rx_fifo_size;
+ uint32_t bytes_avail;
+
+ bytes_avail = write_index - read_index;
+ if (write_index < read_index)
+ /*
+ * Case: W < R - Write has wrapped
+ * --------------------------------
+ * In this case, the write operation has wrapped past the end
+ * of the FIFO which means that now calculating the amount of
+ * data in the FIFO results in a negative number. This can be
+ * easily fixed by adding the fifo_size to the value. Even
+ * though the values are unsigned, subtraction is always done
+ * using 2's complement which means that the result will still
+ * be correct once the FIFO size has been added to the negative
+ * result.
+ *
+ * Example:
+ * '-' = data in fifo
+ * '.' = empty
+ *
+ * 0 1
+ * 0123456789012345
+ * |-----w.....r----|
+ * 0 N
+ *
+ * write = 5 = 101b
+ * read = 11 = 1011b
+ * Data in FIFO
+ * (write - read) + fifo_size = (101b - 1011b) + 10000b
+ * = 11111010b + 10000b = 1010b = 10
+ */
+ bytes_avail += fifo_size;
+
+ return bytes_avail;
+}
+
+/**
+ * fifo_write_avail() - how many bytes can be written to the edge
+ * @einfo: The concerned edge to query.
+ *
+ * Calculates the number of bytes that can be transmitted at this time.
+ * Automatically reserves some space to maintain alignment when the fifo is
+ * completely full, and reserves space so that the flush command can always be
+ * transmitted when needed.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_write_avail(struct edge_info *einfo)
+{
+ uint32_t read_index = einfo->tx_ch_desc->read_index;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->tx_fifo_size;
+ uint32_t bytes_avail = read_index - write_index;
+
+ if (read_index <= write_index)
+ bytes_avail += fifo_size;
+ if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+ bytes_avail = 0;
+ else
+ bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+ return bytes_avail;
+}
+
+/**
+ * fifo_read() - read data from an edge
+ * @einfo: The concerned edge to read from.
+ * @_data: Buffer to copy the read data into.
+ * @len: The ammount of data to read in bytes.
+ *
+ * Return: The number of bytes read.
+ */
+static int fifo_read(struct edge_info *einfo, void *_data, int len)
+{
+ void *ptr;
+ void *data = _data;
+ int orig_len = len;
+ uint32_t read_index = einfo->rx_ch_desc->read_index;
+ uint32_t write_index = einfo->rx_ch_desc->write_index;
+ uint32_t fifo_size = einfo->rx_fifo_size;
+ uint32_t n;
+
+ while (len) {
+ ptr = einfo->rx_fifo + read_index;
+ if (read_index <= write_index)
+ n = write_index - read_index;
+ else
+ n = fifo_size - read_index;
+
+ if (n == 0)
+ break;
+ if (n > len)
+ n = len;
+
+ einfo->read_from_fifo(data, ptr, n);
+
+ data += n;
+ len -= n;
+ read_index += n;
+ if (read_index >= fifo_size)
+ read_index -= fifo_size;
+ }
+ einfo->rx_ch_desc->read_index = read_index;
+
+ return orig_len - len;
+}
+
+/**
+ * fifo_write_body() - Copy transmit data into an edge
+ * @einfo: The concerned edge to copy into.
+ * @_data: Buffer of data to copy from.
+ * @len: Size of data to copy in bytes.
+ * @write_index: Index into the channel where the data should be copied.
+ *
+ * Return: Number of bytes remaining to be copied into the edge.
+ */
+static uint32_t fifo_write_body(struct edge_info *einfo, const void *_data,
+ int len, uint32_t *write_index)
+{
+ void *ptr;
+ const void *data = _data;
+ uint32_t read_index = einfo->tx_ch_desc->read_index;
+ uint32_t fifo_size = einfo->tx_fifo_size;
+ uint32_t n;
+
+ while (len) {
+ ptr = einfo->tx_fifo + *write_index;
+ if (*write_index < read_index) {
+ n = read_index - *write_index - FIFO_FULL_RESERVE;
+ } else {
+ if (read_index < FIFO_FULL_RESERVE)
+ n = fifo_size + read_index - *write_index -
+ FIFO_FULL_RESERVE;
+ else
+ n = fifo_size - *write_index;
+ }
+
+ if (n == 0)
+ break;
+ if (n > len)
+ n = len;
+
+ einfo->write_to_fifo(ptr, data, n);
+
+ data += n;
+ len -= n;
+ *write_index += n;
+ if (*write_index >= fifo_size)
+ *write_index -= fifo_size;
+ }
+ return len;
+}
+
+/**
+ * fifo_write() - Write data into an edge
+ * @einfo: The concerned edge to write to.
+ * @data: Buffer of data to write.
+ * @len: Length of data to write, in bytes.
+ *
+ * Wrapper around fifo_write_body() to manage additional details that are
+ * necessary for a complete write event. Does not manage concurrency. Clients
+ * should use fifo_write_avail() to check if there is sufficent space before
+ * calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write(struct edge_info *einfo, const void *data, int len)
+{
+ int orig_len = len;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+ len = fifo_write_body(einfo, data, len, &write_index);
+ einfo->tx_ch_desc->write_index = write_index;
+ send_irq(einfo);
+
+ return orig_len - len;
+}
+
+/**
+ * fifo_write_complex() - writes a transaction of multiple buffers to an edge
+ * @einfo: The concerned edge to write to.
+ * @data1: The first buffer of data to write.
+ * @len1: The length of the first buffer in bytes.
+ * @data2: The second buffer of data to write.
+ * @len2: The length of the second buffer in bytes.
+ * @data3: The thirs buffer of data to write.
+ * @len3: The length of the third buffer in bytes.
+ *
+ * A variant of fifo_write() which optimizes the usecase found in tx(). The
+ * remote side expects all or none of the transmitted data to be available.
+ * This prevents the tx() usecase from calling fifo_write() multiple times. The
+ * alternative would be an allocation and additional memcpy to create a buffer
+ * to copy all the data segments into one location before calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write_complex(struct edge_info *einfo,
+ const void *data1, int len1,
+ const void *data2, int len2,
+ const void *data3, int len3)
+{
+ int orig_len = len1 + len2 + len3;
+ uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+ len1 = fifo_write_body(einfo, data1, len1, &write_index);
+ len2 = fifo_write_body(einfo, data2, len2, &write_index);
+ len3 = fifo_write_body(einfo, data3, len3, &write_index);
+ einfo->tx_ch_desc->write_index = write_index;
+ send_irq(einfo);
+
+ return orig_len - len1 - len2 - len3;
+}
+
+/**
+ * send_tx_blocked_signal() - send the flush command as we are blocked from tx
+ * @einfo: The concerned edge which is blocked.
+ *
+ * Used to send a signal to the remote side that we have no more space to
+ * transmit data and therefore need the remote side to signal us when they have
+ * cleared some space by reading some data. This function relies upon the
+ * assumption that fifo_write_avail() will reserve some space so that the flush
+ * signal command can always be put into the transmit fifo, even when "everyone"
+ * else thinks that the transmit fifo is truely full. This function assumes
+ * that it is called with the write_lock already locked.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+ struct read_notif_request {
+ uint16_t cmd;
+ uint16_t reserved;
+ uint32_t reserved2;
+ };
+ struct read_notif_request read_notif_req;
+
+ read_notif_req.cmd = READ_NOTIF_CMD;
+ read_notif_req.reserved = 0;
+ read_notif_req.reserved2 = 0;
+
+ if (!einfo->tx_blocked_signal_sent) {
+ einfo->tx_blocked_signal_sent = true;
+ fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
+ }
+}
+
+/**
+ * fifo_tx() - transmit data on an edge
+ * @einfo: The concerned edge to transmit on.
+ * @data: Buffer of data to transmit.
+ * @len: Length of data to transmit in bytes.
+ *
+ * This helper function is the preferred interface to fifo_write() and should
+ * be used in the normal case for transmitting entities. fifo_tx() will block
+ * until there is sufficent room to transmit the requested ammount of data.
+ * fifo_tx() will manage any concurrency between multiple transmitters on a
+ * channel.
+ *
+ * Return: Number of bytes transmitted.
+ */
+static int fifo_tx(struct edge_info *einfo, const void *data, int len)
+{
+ unsigned long flags;
+ int ret;
+
+ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ while (fifo_write_avail(einfo) < len) {
+ send_tx_blocked_signal(einfo);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (fifo_write_avail(einfo) < len)
+ schedule();
+ finish_wait(&einfo->tx_blocked_queue, &wait);
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ }
+ ret = fifo_write(einfo, data, len);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+ return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo: The edge the data was received on.
+ * @cmd_id: ID to specify the type of data.
+ * @rcid: The remote channel id associated with the data.
+ * @intend_id: The intent the data should be put in.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+ uint32_t rcid, uint32_t intent_id)
+{
+ struct command {
+ uint32_t frag_size;
+ uint32_t size_remaining;
+ };
+ struct command cmd;
+ struct glink_core_rx_intent *intent;
+ char trash[FIFO_ALIGNMENT];
+ int alignment;
+ bool err = false;
+
+ fifo_read(einfo, &cmd, sizeof(cmd));
+
+ intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+ &einfo->xprt_if, rcid, intent_id);
+ if (intent == NULL) {
+ GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+ intent_id);
+ err = true;
+ } else if (intent->data == NULL) {
+ if (einfo->intentless) {
+ intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+ if (!intent->data)
+ err = true;
+ else
+ intent->intent_size = cmd.frag_size;
+ } else {
+ GLINK_ERR(
+ "%s: intent for ch %d liid %d has no data buff\n",
+ __func__, rcid, intent_id);
+ err = true;
+ }
+ }
+
+ if (!err &&
+ (intent->intent_size - intent->write_offset < cmd.frag_size ||
+ intent->write_offset + cmd.size_remaining > intent->intent_size)) {
+ GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+ __func__,
+ cmd.frag_size,
+ cmd.size_remaining,
+ "will overflow ch",
+ rcid,
+ "intent",
+ intent_id);
+ err = true;
+ }
+
+ if (err) {
+ alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+ alignment -= cmd.frag_size;
+ while (cmd.frag_size) {
+ if (cmd.frag_size > FIFO_ALIGNMENT) {
+ fifo_read(einfo, trash, FIFO_ALIGNMENT);
+ cmd.frag_size -= FIFO_ALIGNMENT;
+ } else {
+ fifo_read(einfo, trash, cmd.frag_size);
+ cmd.frag_size = 0;
+ }
+ }
+ if (alignment)
+ fifo_read(einfo, trash, alignment);
+ return;
+ }
+ fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
+ intent->write_offset += cmd.frag_size;
+ intent->pkt_size += cmd.frag_size;
+
+ alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+ alignment -= cmd.frag_size;
+ if (alignment)
+ fifo_read(einfo, trash, alignment);
+
+ if (unlikely((cmd_id == TRACER_PKT_CMD ||
+ cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
+ tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+ intent->tracer_pkt = true;
+ }
+
+ einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+ rcid,
+ intent,
+ cmd.size_remaining ?
+ false : true);
+}
+
+/**
+ * queue_cmd() - queue a deferred command for later processing
+ * @einfo: Edge to queue commands on.
+ * @cmd: Command to queue.
+ * @data: Command specific data to queue with the command.
+ *
+ * Return: True if queuing was successful, false otherwise.
+ */
+static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ };
+ struct command *_cmd = cmd;
+ struct deferred_cmd *d_cmd;
+
+ d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
+ if (!d_cmd) {
+ GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
+ return false;
+ }
+ d_cmd->id = _cmd->id;
+ d_cmd->param1 = _cmd->param1;
+ d_cmd->param2 = _cmd->param2;
+ d_cmd->data = data;
+ list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ return true;
+}
+
+/**
+ * get_rx_fifo() - Find the rx fifo for an edge
+ * @einfo: Edge to find the fifo for.
+ *
+ * Return: True if fifo was found, false otherwise.
+ */
+static bool get_rx_fifo(struct edge_info *einfo)
+{
+ return true;
+}
+
+/**
+ * __rx_worker() - process received commands on a specific edge
+ * @einfo: Edge to process commands on.
+ * @atomic_ctx: Indicates if the caller is in atomic context and requires any
+ * non-atomic operations to be deferred.
+ */
+static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t param1;
+ uint32_t param2;
+ };
+ struct intent_desc {
+ uint32_t size;
+ uint32_t id;
+ };
+ struct command cmd;
+ struct intent_desc intent;
+ struct intent_desc *intents;
+ int i;
+ bool granted;
+ unsigned long flags;
+ bool trigger_wakeup = false;
+ int rcu_id;
+ uint16_t rcid;
+ uint32_t name_len;
+ uint32_t len;
+ char *name;
+ char trash[FIFO_ALIGNMENT];
+ struct deferred_cmd *d_cmd;
+ void *cmd_data;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ if (unlikely(!einfo->rx_fifo)) {
+ if (!get_rx_fifo(einfo)) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return;
+ }
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+ }
+
+ if (!atomic_ctx) {
+ if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+ einfo->tx_resume_needed = false;
+ einfo->xprt_if.glink_core_if_ptr->tx_resume(
+ &einfo->xprt_if);
+ }
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ if (waitqueue_active(&einfo->tx_blocked_queue)) {
+ einfo->tx_blocked_signal_sent = false;
+ trigger_wakeup = true;
+ }
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ if (trigger_wakeup)
+ wake_up_all(&einfo->tx_blocked_queue);
+ }
+
+
+ /*
+ * Access to the fifo needs to be synchronized, however only the calls
+ * into the core from process_rx_data() are compatible with an atomic
+ * processing context. For everything else, we need to do all the fifo
+ * processing, then unlock the lock for the call into the core. Data
+ * in the fifo is allowed to be processed immediately instead of being
+ * ordered with the commands because the channel open process prevents
+ * intents from being queued (which prevents data from being sent) until
+ * all the channel open commands are processed by the core, thus
+ * eliminating a race.
+ */
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ while (fifo_read_avail(einfo) ||
+ (!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
+
+ if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
+ d_cmd = list_first_entry(&einfo->deferred_cmds,
+ struct deferred_cmd, list_node);
+ list_del(&d_cmd->list_node);
+ cmd.id = d_cmd->id;
+ cmd.param1 = d_cmd->param1;
+ cmd.param2 = d_cmd->param2;
+ cmd_data = d_cmd->data;
+ kfree(d_cmd);
+ } else {
+ fifo_read(einfo, &cmd, sizeof(cmd));
+ cmd_data = NULL;
+ }
+
+ switch (cmd.id) {
+ case VERSION_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case VERSION_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case OPEN_CMD:
+ rcid = cmd.param1;
+ name_len = cmd.param2;
+
+ if (cmd_data) {
+ name = cmd_data;
+ } else {
+ len = ALIGN(name_len, FIFO_ALIGNMENT);
+ name = kmalloc(len, GFP_ATOMIC);
+ if (!name) {
+ pr_err("No memory available to rx ch open cmd name. Discarding cmd.\n");
+ while (len) {
+ fifo_read(einfo, trash,
+ FIFO_ALIGNMENT);
+ len -= FIFO_ALIGNMENT;
+ }
+ break;
+ }
+ fifo_read(einfo, name, len);
+ }
+ if (atomic_ctx) {
+ if (!queue_cmd(einfo, &cmd, name))
+ kfree(name);
+ break;
+ }
+
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+ &einfo->xprt_if,
+ rcid,
+ name,
+ SMEM_XPRT_ID);
+ kfree(name);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case CLOSE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_ch_remote_close(
+ &einfo->xprt_if,
+ cmd.param1);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case OPEN_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ SMEM_XPRT_ID);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_CMD:
+ /*
+ * One intent listed with this command. This is the
+ * expected case and can be optimized over the general
+ * case of an array of intents.
+ */
+ if (cmd.param2 == 1) {
+ if (cmd_data) {
+ intent.id = ((struct intent_desc *)
+ cmd_data)->id;
+ intent.size = ((struct intent_desc *)
+ cmd_data)->size;
+ kfree(cmd_data);
+ } else {
+ fifo_read(einfo, &intent,
+ sizeof(intent));
+ }
+ if (atomic_ctx) {
+ cmd_data = kmalloc(sizeof(intent),
+ GFP_ATOMIC);
+ if (!cmd_data) {
+ pr_err("%s: dropping cmd %d\n",
+ __func__,
+ cmd.id);
+ break;
+ }
+ ((struct intent_desc *)cmd_data)->id =
+ intent.id;
+ ((struct intent_desc *)cmd_data)->size =
+ intent.size;
+ if (!queue_cmd(einfo, &cmd, cmd_data))
+ kfree(cmd_data);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put(
+ &einfo->xprt_if,
+ cmd.param1,
+ intent.id,
+ intent.size);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ }
+
+ /* Array of intents to process */
+ if (cmd_data) {
+ intents = cmd_data;
+ } else {
+ intents = kmalloc(sizeof(*intents) * cmd.param2,
+ GFP_ATOMIC);
+ if (!intents) {
+ for (i = 0; i < cmd.param2; ++i)
+ fifo_read(einfo, &intent,
+ sizeof(intent));
+ break;
+ }
+ fifo_read(einfo, intents,
+ sizeof(*intents) * cmd.param2);
+ }
+ if (atomic_ctx) {
+ if (!queue_cmd(einfo, &cmd, intents))
+ kfree(intents);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ for (i = 0; i < cmd.param2; ++i) {
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_put(
+ &einfo->xprt_if,
+ cmd.param1,
+ intents[i].id,
+ intents[i].size);
+ }
+ kfree(intents);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_DONE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2,
+ false);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_REQ_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_remote_rx_intent_req(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_INTENT_REQ_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ granted = false;
+ if (cmd.param2 == 1)
+ granted = true;
+ einfo->xprt_if.glink_core_if_ptr->
+ rx_cmd_rx_intent_req_ack(
+ &einfo->xprt_if,
+ cmd.param1,
+ granted);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case TX_DATA_CMD:
+ case TX_DATA_CONT_CMD:
+ case TRACER_PKT_CMD:
+ case TRACER_PKT_CONT_CMD:
+ process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
+ break;
+ case CLOSE_ACK_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+ &einfo->xprt_if,
+ cmd.param1);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case READ_NOTIF_CMD:
+ send_irq(einfo);
+ break;
+ case SIGNALS_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ case RX_DONE_W_REUSE_CMD:
+ if (atomic_ctx) {
+ queue_cmd(einfo, &cmd, NULL);
+ break;
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+ &einfo->xprt_if,
+ cmd.param1,
+ cmd.param2,
+ true);
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ break;
+ default:
+ pr_err("Unrecognized command: %d\n", cmd.id);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - worker function to process received commands
+ * @work: kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(work, struct edge_info, kwork);
+ __rx_worker(einfo, false);
+}
+
+irqreturn_t irq_handler(int irq, void *priv)
+{
+ struct edge_info *einfo = (struct edge_info *)priv;
+
+ if (einfo->rx_reset_reg)
+ writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
+
+ queue_kthread_work(&einfo->kworker, &einfo->kwork);
+ einfo->rx_irq_count++;
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = VERSION_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @version: The version number to encode.
+ * @features: The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features)
+{
+ struct command {
+ uint16_t id;
+ uint16_t version;
+ uint32_t features;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = VERSION_ACK_CMD;
+ cmd.version = version;
+ cmd.features = features;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr: The transport to configure.
+ * @version: The version to use.
+ * @features: The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+ uint32_t features)
+{
+ struct edge_info *einfo;
+ uint32_t ret;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ ret = einfo->intentless ?
+ GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
+
+ if (features & TRACER_PKT_FEATURE)
+ ret |= GCAP_TRACER_PKT;
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @name: The channel name to encode.
+ * @req_xprt: The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t length;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t buf_size;
+ void *buf;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = OPEN_CMD;
+ cmd.lcid = lcid;
+ cmd.length = strlen(name) + 1;
+
+ buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ pr_err("%s: malloc fail for %d size buf\n", __func__, buf_size);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOMEM;
+ }
+
+ memcpy(buf, &cmd, sizeof(cmd));
+ memcpy(buf + sizeof(cmd), name, cmd.length);
+
+ fifo_tx(einfo, buf, buf_size);
+
+ kfree(buf);
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = CLOSE_CMD;
+ cmd.lcid = lcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ * @xprt_resp: The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = OPEN_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ * and transmit
+ * @if_ptr: The transport to transmit on.
+ * @rcid: The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+ uint32_t rcid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t rcid;
+ uint32_t reserved;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = CLOSE_ACK_CMD;
+ cmd.rcid = rcid;
+ cmd.reserved = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr: The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+int wait_link_down(struct glink_transport_if *if_ptr)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->tx_ch_desc->write_index == 0 &&
+ einfo->tx_ch_desc->read_index == 0 &&
+ einfo->rx_ch_desc->write_index == 0 &&
+ einfo->rx_ch_desc->read_index == 0)
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @size: size of intent.
+ * @intent: Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+ struct glink_core_rx_intent *intent)
+{
+ void *t;
+
+ t = kmalloc(size, GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ intent->data = t;
+ intent->iovec = (void *)intent;
+ intent->vprovider = rx_linear_vbuf_provider;
+ intent->pprovider = NULL;
+ return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr: The transport the intent is associated with.
+ * @intent: Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent)
+{
+ if (!intent || !intent->data)
+ return -EINVAL;
+
+ kfree(intent->data);
+ intent->data = NULL;
+ intent->iovec = NULL;
+ intent->vprovider = NULL;
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The intent size to encode.
+ * @liid: The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t count;
+ uint32_t size;
+ uint32_t liid;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = RX_INTENT_CMD;
+ cmd.lcid = lcid;
+ cmd.count = 1;
+ cmd.size = size;
+ cmd.liid = liid;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @liid: The local intent id to encode.
+ * @reuse: Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t liid;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+ cmd.lcid = lcid;
+ cmd.liid = liid;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ * transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @size: The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t size;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ if (size > UINT_MAX) {
+ pr_err("%s: size %zu is too large to encode\n", __func__, size);
+ return -EMSGSIZE;
+ }
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = RX_INTENT_REQ_CMD,
+ cmd.lcid = lcid;
+ cmd.size = size;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ * format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @granted: The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t response;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (einfo->intentless)
+ return -EOPNOTSUPP;
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = RX_INTENT_REQ_ACK_CMD,
+ cmd.lcid = lcid;
+ if (granted)
+ cmd.response = 1;
+ else
+ cmd.response = 0;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @sigs: The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+ uint32_t sigs)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t sigs;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ cmd.id = SIGNALS_CMD,
+ cmd.lcid = lcid;
+ cmd.sigs = sigs;
+
+ fifo_tx(einfo, &cmd, sizeof(cmd));
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr: The transport the channel exists on.
+ * @lcid: The local channel id.
+ *
+ * Return: 0 if no data available, 1 if data available.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+ struct edge_info *einfo;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ if (fifo_read_avail(einfo)) {
+ __rx_worker(einfo, true);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 1;
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq for a channel
+ * @if_ptr: The transport the channel exists on.
+ * @lcid: The local channel id for the channel.
+ * @mask: True to mask the irq, false to unmask.
+ * @pstruct: Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct)
+{
+ struct edge_info *einfo;
+ struct irq_chip *irq_chip;
+ struct irq_data *irq_data;
+ int rcu_id;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ irq_chip = irq_get_chip(einfo->irq_line);
+ if (!irq_chip) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENODEV;
+ }
+
+ irq_data = irq_get_irq_data(einfo->irq_line);
+ if (!irq_data) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENODEV;
+ }
+
+ if (mask) {
+ irq_chip->irq_mask(irq_data);
+ einfo->irq_disabled = true;
+ if (pstruct)
+ irq_set_affinity(einfo->irq_line, pstruct);
+ } else {
+ irq_chip->irq_unmask(irq_data);
+ einfo->irq_disabled = false;
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @cmd_id: The command ID to transmit.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+ struct command {
+ uint16_t id;
+ uint16_t lcid;
+ uint32_t riid;
+ uint32_t size;
+ uint32_t size_left;
+ };
+ struct command cmd;
+ struct edge_info *einfo;
+ uint32_t size;
+ uint32_t zeros_size;
+ const void *data_start;
+ char zeros[FIFO_ALIGNMENT] = { 0 };
+ unsigned long flags;
+ size_t tx_size = 0;
+ int rcu_id;
+
+ if (pctx->size < pctx->size_remaining) {
+ GLINK_ERR("%s: size remaining exceeds size. Resetting.\n",
+ __func__);
+ pctx->size_remaining = pctx->size;
+ }
+ if (!pctx->size_remaining)
+ return 0;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ rcu_id = srcu_read_lock(&einfo->use_ref);
+
+ if (einfo->intentless &&
+ (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ if (cmd_id == TX_DATA_CMD) {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TX_DATA_CMD;
+ else
+ cmd.id = TX_DATA_CONT_CMD;
+ } else {
+ if (pctx->size_remaining == pctx->size)
+ cmd.id = TRACER_PKT_CMD;
+ else
+ cmd.id = TRACER_PKT_CONT_CMD;
+ }
+ cmd.lcid = lcid;
+ cmd.riid = pctx->riid;
+ data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+ &tx_size);
+ if (!data_start) {
+ GLINK_ERR("%s: invalid data_start\n", __func__);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&einfo->write_lock, flags);
+ size = fifo_write_avail(einfo);
+
+ /* Intentless clients expect a complete commit or instant failure */
+ if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -ENOSPC;
+ }
+
+ /* Need enough space to write the command and some data */
+ if (size <= sizeof(cmd)) {
+ einfo->tx_resume_needed = true;
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return -EAGAIN;
+ }
+ size -= sizeof(cmd);
+ if (size > tx_size)
+ size = tx_size;
+
+ cmd.size = size;
+ pctx->size_remaining -= size;
+ cmd.size_left = pctx->size_remaining;
+ zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
+ if (cmd.id == TRACER_PKT_CMD)
+ tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+ fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size, zeros,
+ zeros_size);
+ GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+ "<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+ cmd.size_left);
+ spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+ /* Fake tx_done for intentless since its not supported over the wire */
+ if (einfo->intentless) {
+ spin_lock_irqsave(&einfo->rx_lock, flags);
+ cmd.id = RX_DONE_CMD;
+ cmd.lcid = pctx->rcid;
+ queue_cmd(einfo, &cmd, NULL);
+ spin_unlock_irqrestore(&einfo->rx_lock, flags);
+ }
+
+ srcu_read_unlock(&einfo->use_ref, rcu_id);
+ return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr: The transport to transmit on.
+ * @lcid: The local channel id to encode.
+ * @pctx: The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx)
+{
+ return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ * votes to be applied
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+ struct glink_transport_if *if_ptr,
+ uint32_t state)
+{
+ struct edge_info *einfo;
+
+ einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+ if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
+ return (unsigned long)ERR_PTR(-EINVAL);
+
+ return einfo->ramp_time_us[state];
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr: The transport interface on which power voting is requested.
+ * @state: The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+ return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr: The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+ return 0;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr: The transport for which features are negotiated for.
+ * @version: The version negotiated.
+ * @features: The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+ const struct glink_core_version *version,
+ uint32_t features)
+{
+ return features & version->features;
+}
+
+/**
+ * init_xprt_if() - initialize the xprt_if for an edge
+ * @einfo: The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+ einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+ einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+ einfo->xprt_if.set_version = set_version;
+ einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+ einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+ einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+ einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+ einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+ einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+ einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+ einfo->xprt_if.tx = tx;
+ einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+ einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+ tx_cmd_remote_rx_intent_req_ack;
+ einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+ einfo->xprt_if.poll = poll;
+ einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+ einfo->xprt_if.wait_link_down = wait_link_down;
+ einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+ einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+ einfo->xprt_if.power_vote = power_vote;
+ einfo->xprt_if.power_unvote = power_unvote;
+}
+
+static struct qcom_smd_device *to_ipc_device(struct device *dev)
+{
+ return container_of(dev, struct qcom_smd_device, dev);
+}
+
+static struct qcom_smd_driver *to_ipc_driver(struct device *dev)
+{
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+
+ return container_of(qidev->dev.driver, struct qcom_smd_driver, driver);
+}
+
+static int qcom_ipc_dev_match(struct device *dev, struct device_driver *drv)
+{
+ return of_driver_match_device(dev, drv);
+}
+
+static void msm_rpm_trans_notify_tx_done(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr)
+{
+ return;
+}
+
+static void msm_rpm_trans_notify_state(void *handle, const void *priv,
+ unsigned event)
+{
+ switch (event) {
+ case GLINK_CONNECTED:
+ if (IS_ERR_OR_NULL(handle)) {
+ pr_err("glink_handle %d\n",
+ (int)PTR_ERR(handle));
+ BUG_ON(1);
+ }
+ complete(&glink_ack);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Probe the ipc client.
+ */
+static int qcom_ipc_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+ struct qcom_smd_driver *qidrv = to_ipc_driver(dev);
+ struct glink_open_config *open_config;
+ const char *channel_name, *key;
+ int ret;
+
+ key = "qcom,glink-channels";
+ ret = of_property_read_string(dev->of_node, key,
+ &channel_name);
+
+ if (ret) {
+ pr_err("Failed to read node: %s, key=%s\n",
+ dev->of_node->full_name, key);
+ return ret;
+ }
+
+ open_config = kzalloc(sizeof(*open_config), GFP_KERNEL);
+
+ /* open a glink channel */
+ open_config->name = channel_name;
+ open_config->priv = qidev;
+ open_config->edge = dev_get_drvdata(dev);
+ open_config->notify_rx = qidrv->callback;
+ open_config->notify_tx_done = msm_rpm_trans_notify_tx_done;
+ open_config->notify_state = msm_rpm_trans_notify_state;
+
+ qidev->channel = glink_open(open_config);
+ ret = wait_for_completion_timeout(&glink_ack, GLINK_RPM_REQUEST_TIMEOUT);
+ if (!ret)
+ return -ETIMEDOUT;
+
+ ret = qidrv->probe(qidev);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&qidev->dev, "probe failed\n");
+ return ret;
+}
+
+static int qcom_ipc_dev_remove(struct device *dev)
+{
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+ struct qcom_smd_driver *qidrv = to_ipc_driver(dev);
+ int ret;
+
+ ret = glink_close(qidev->channel);
+ if (ret)
+ dev_err(&qidev->dev, "glink_close failed");
+
+ qidrv->remove(qidev);
+
+ return ret;
+}
+
+static struct bus_type qcom_ipc_bus = {
+ .name = "qcom_ipc",
+ .match = qcom_ipc_dev_match,
+ .probe = qcom_ipc_dev_probe,
+ .remove = qcom_ipc_dev_remove,
+};
+
+/*
+ * Release function for the qcom_smd_device object.
+ */
+static void qcom_ipc_release_device(struct device *dev)
+{
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+
+ kfree(qidev);
+}
+
+/*
+ * Create a ipc client device for channel that is being opened.
+ */
+static int qcom_ipc_create_device(struct device_node *node,
+ const void *edge_name)
+{
+ struct qcom_smd_device *qidev;
+ const char *name = edge_name;
+ int ret;
+
+ qidev = kzalloc(sizeof(*qidev), GFP_KERNEL);
+ if (!qidev)
+ return -ENOMEM;
+
+ dev_set_name(&qidev->dev, "%s.%s", name, node->name);
+ qidev->dev.parent = glink_dev;
+ qidev->dev.bus = &qcom_ipc_bus;
+ qidev->dev.release = qcom_ipc_release_device;
+ qidev->dev.of_node = node;
+
+ dev_set_drvdata(&qidev->dev, (void *)edge_name);
+
+ ret = device_register(&qidev->dev);
+ if (ret) {
+ dev_err(&qidev->dev, "device_register failed: %d\n", ret);
+ put_device(&qidev->dev);
+ }
+
+ return ret;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo: The edge to initialize.
+ * @name: The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+ einfo->xprt_cfg.name = XPRT_NAME;
+ einfo->xprt_cfg.edge = name;
+ einfo->xprt_cfg.versions = versions;
+ einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+ einfo->xprt_cfg.max_cid = SZ_64K;
+ einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+static int glink_edge_parse(struct device_node *node, const char *edge_name)
+{
+ struct device_node *child_node;
+ struct edge_info *einfo;
+ int rc;
+ char *key;
+ const char *subsys_name;
+ uint32_t irq_line;
+ uint32_t irq_mask;
+ struct resource irq_r;
+ struct resource msgram_r;
+ void __iomem *msgram;
+ char toc[RPM_TOC_SIZE];
+ uint32_t *tocp;
+ uint32_t num_toc_entries;
+
+ einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+ if (!einfo) {
+ pr_err("%s: edge_info allocation failed\n", __func__);
+ rc = -ENOMEM;
+ goto edge_info_alloc_fail;
+ }
+
+ subsys_name = edge_name;
+
+ key = "interrupts";
+ irq_line = irq_of_parse_and_map(node, 0);
+ if (!irq_line) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ key = "qcom,irq-mask";
+ rc = of_property_read_u32(node, key, &irq_mask);
+ if (rc) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ rc = of_address_to_resource(node, 1, &irq_r);
+ if (rc || !irq_r.start) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ rc = of_address_to_resource(node, 0, &msgram_r);
+ if (rc || !msgram_r.start) {
+ pr_err("%s: missing key %s\n", __func__, key);
+ rc = -ENODEV;
+ goto missing_key;
+ }
+
+ init_xprt_cfg(einfo, subsys_name);
+ init_xprt_if(einfo);
+ spin_lock_init(&einfo->write_lock);
+ init_waitqueue_head(&einfo->tx_blocked_queue);
+ init_kthread_work(&einfo->kwork, rx_worker);
+ init_kthread_worker(&einfo->kworker);
+ einfo->intentless = true;
+ einfo->read_from_fifo = memcpy32_fromio;
+ einfo->write_to_fifo = memcpy32_toio;
+ init_srcu_struct(&einfo->use_ref);
+ spin_lock_init(&einfo->rx_lock);
+ INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+ einfo->out_irq_mask = irq_mask;
+ einfo->out_irq_reg = ioremap_nocache(irq_r.start,
+ resource_size(&irq_r));
+
+ if (!einfo->out_irq_reg) {
+ pr_err("%s: unable to map irq reg\n", __func__);
+ rc = -ENOMEM;
+ goto irq_ioremap_fail;
+ }
+
+ msgram = ioremap_nocache(msgram_r.start, resource_size(&msgram_r));
+ if (!msgram) {
+ pr_err("%s: unable to map msgram\n", __func__);
+ rc = -ENOMEM;
+ goto msgram_ioremap_fail;
+ }
+
+ einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+ "smem_native_%s", subsys_name);
+ if (IS_ERR(einfo->task)) {
+ rc = PTR_ERR(einfo->task);
+ pr_err("%s: kthread_run failed %d\n", __func__, rc);
+ goto kthread_fail;
+ }
+
+ memcpy32_fromio(toc, msgram + resource_size(&msgram_r) - RPM_TOC_SIZE,
+ RPM_TOC_SIZE);
+ tocp = (uint32_t *)toc;
+ if (*tocp != RPM_TOC_ID) {
+ rc = -ENODEV;
+ pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
+ goto toc_init_fail;
+ }
+ ++tocp;
+ num_toc_entries = *tocp;
+ if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
+ rc = -ENODEV;
+ pr_err("%s: %d is too many toc entries\n", __func__,
+ num_toc_entries);
+ goto toc_init_fail;
+ }
+ ++tocp;
+
+ for (rc = 0; rc < num_toc_entries; ++rc) {
+ if (*tocp != RPM_TX_FIFO_ID) {
+ tocp += 3;
+ continue;
+ }
+ ++tocp;
+ einfo->tx_ch_desc = msgram + *tocp;
+ einfo->tx_fifo = einfo->tx_ch_desc + 1;
+ if ((uintptr_t)einfo->tx_fifo >
+ (uintptr_t)(msgram + resource_size(&msgram_r))) {
+ pr_err("%s: invalid tx fifo address\n", __func__);
+ einfo->tx_fifo = NULL;
+ break;
+ }
+ ++tocp;
+ einfo->tx_fifo_size = *tocp;
+ if (einfo->tx_fifo_size > resource_size(&msgram_r) ||
+ (uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
+ (uintptr_t)(msgram + resource_size(&msgram_r))) {
+ pr_err("%s: invalid tx fifo size\n", __func__);
+ einfo->tx_fifo = NULL;
+ break;
+ }
+ break;
+ }
+ if (!einfo->tx_fifo) {
+ rc = -ENODEV;
+ pr_err("%s: tx fifo not found\n", __func__);
+ goto toc_init_fail;
+ }
+
+ tocp = (uint32_t *)toc;
+ tocp += 2;
+ for (rc = 0; rc < num_toc_entries; ++rc) {
+ if (*tocp != RPM_RX_FIFO_ID) {
+ tocp += 3;
+ continue;
+ }
+ ++tocp;
+ einfo->rx_ch_desc = msgram + *tocp;
+ einfo->rx_fifo = einfo->rx_ch_desc + 1;
+ if ((uintptr_t)einfo->rx_fifo >
+ (uintptr_t)(msgram + resource_size(&msgram_r))) {
+ pr_err("%s: invalid rx fifo address\n", __func__);
+ einfo->rx_fifo = NULL;
+ break;
+ }
+ ++tocp;
+ einfo->rx_fifo_size = *tocp;
+ if (einfo->rx_fifo_size > resource_size(&msgram_r) ||
+ (uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
+ (uintptr_t)(msgram + resource_size(&msgram_r))) {
+ pr_err("%s: invalid rx fifo size\n", __func__);
+ einfo->rx_fifo = NULL;
+ break;
+ }
+ break;
+ }
+ if (!einfo->rx_fifo) {
+ rc = -ENODEV;
+ pr_err("%s: rx fifo not found\n", __func__);
+ goto toc_init_fail;
+ }
+
+ einfo->tx_ch_desc->write_index = 0;
+ einfo->rx_ch_desc->read_index = 0;
+
+ rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+ if (rc == -EPROBE_DEFER)
+ goto reg_xprt_fail;
+ if (rc) {
+ pr_err("%s: glink core register transport failed: %d\n",
+ __func__, rc);
+ goto reg_xprt_fail;
+ }
+
+ einfo->irq_line = irq_line;
+ rc = request_irq(irq_line, irq_handler,
+ IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+ node->name, einfo);
+ if (rc < 0) {
+ pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+ rc);
+ goto request_irq_fail;
+ }
+ rc = enable_irq_wake(irq_line);
+ if (rc < 0)
+ pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+ irq_line);
+
+ register_debugfs_info(einfo);
+ einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+
+ /* scan through all the edges available channels */
+ for_each_available_child_of_node(node, child_node)
+ qcom_ipc_create_device(child_node, edge_name);
+ return 0;
+
+request_irq_fail:
+ glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+toc_init_fail:
+ flush_kthread_worker(&einfo->kworker);
+ kthread_stop(einfo->task);
+ einfo->task = NULL;
+kthread_fail:
+ iounmap(msgram);
+msgram_ioremap_fail:
+ iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+missing_key:
+ kfree(einfo);
+edge_info_alloc_fail:
+ return rc;
+}
+
+static int glink_native_probe(struct platform_device *pdev)
+{
+ struct device_node *node;
+ const char *edge_name, *key;
+ int ret;
+
+ glink_dev = &pdev->dev;
+
+ init_completion(&glink_ack);
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ key = "qcom,glink-edge";
+ ret = of_property_read_string(node, key, &edge_name);
+ if (ret) {
+ dev_err(&pdev->dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
+
+ glink_edge_parse(node, edge_name);
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * debug_edge() - generates formatted text output displaying current edge state
+ * @s: File to send the output to.
+ */
+static void debug_edge(struct seq_file *s)
+{
+ struct edge_info *einfo;
+ struct glink_dbgfs_data *dfs_d;
+
+ dfs_d = s->private;
+ einfo = dfs_d->priv_data;
+
+/*
+ * formatted, human readable edge state output, ie:
+ * TX/RX fifo information:
+ID|EDGE |TX READ |TX WRITE |TX SIZE |RX READ |RX WRITE |RX SIZE
+-------------------------------------------------------------------------------
+01|mpss |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
+ *
+ * Interrupt information:
+ * EDGE |TX INT |RX INT
+ * --------------------------------
+ * mpss |0x00000006|0x00000008
+ */
+ seq_puts(s, "TX/RX fifo information:\n");
+ seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
+ "ID",
+ "EDGE",
+ "TX READ",
+ "TX WRITE",
+ "TX SIZE",
+ "RX READ",
+ "RX WRITE",
+ "RX SIZE");
+ seq_puts(s,
+ "-------------------------------------------------------------------------------\n");
+ if (!einfo)
+ return;
+
+ if (!einfo->rx_fifo)
+ seq_puts(s, "Link Not Up\n");
+ else
+ seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
+ einfo->tx_ch_desc->read_index,
+ einfo->tx_ch_desc->write_index,
+ einfo->tx_fifo_size,
+ einfo->rx_ch_desc->read_index,
+ einfo->rx_ch_desc->write_index,
+ einfo->rx_fifo_size);
+
+ seq_puts(s, "\nInterrupt information:\n");
+ seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
+ seq_puts(s, "--------------------------------\n");
+ seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
+ einfo->tx_irq_count,
+ einfo->rx_irq_count);
+}
+
+/**
+ * register_debugfs_info() - initialize debugfs device entries
+ * @einfo: Pointer to specific edge_info for which register is called.
+ */
+static void register_debugfs_info(struct edge_info *einfo)
+{
+ struct glink_dbgfs dfs;
+ char *curr_dir_name;
+ int dir_name_len;
+
+ dir_name_len = strlen(einfo->xprt_cfg.edge) +
+ strlen(einfo->xprt_cfg.name) + 2;
+ curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
+ if (!curr_dir_name) {
+ GLINK_ERR("%s: Memory allocation failed\n", __func__);
+ return;
+ }
+
+ snprintf(curr_dir_name, dir_name_len, "%s_%s",
+ einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
+ dfs.curr_name = curr_dir_name;
+ dfs.par_name = "xprt";
+ dfs.b_dir_create = false;
+ glink_debugfs_create("XPRT_INFO", debug_edge,
+ &dfs, einfo, false);
+ kfree(curr_dir_name);
+}
+
+#else
+static void register_debugfs_info(struct edge_info *einfo)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct of_device_id glink_match_table[] = {
+ { .compatible = "qcom,glink" },
+ {},
+};
+
+static struct platform_driver glink_rpm_native_driver = {
+ .probe = glink_native_probe,
+ .driver = {
+ .name = "qcom_glink",
+ .owner = THIS_MODULE,
+ .of_match_table = glink_match_table,
+ },
+};
+
+static const struct of_device_id glink_of_device_ids[] __initconst = {
+ { .compatible = "qcom,glink" },
+ {}
+};
+
+static int __init glink_smem_native_xprt_init(void)
+{
+ int rc = 0;
+ struct device_node *np;
+
+ np = of_find_matching_node(NULL, glink_of_device_ids);
+ if (np) {
+ rc = bus_register(&qcom_ipc_bus);
+ if (rc) {
+ pr_err("failed to register smd bus: %d\n", rc);
+ return rc;
+ }
+ ipc_bus = &qcom_ipc_bus;
+
+ rc = platform_driver_register(&glink_rpm_native_driver);
+ if (rc) {
+ pr_err("%s: glink_rpm_native_driver register failed %d\n",
+ __func__, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+postcore_initcall(glink_smem_native_xprt_init);
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qidrv: qcom_smd_driver struct
+ */
+int qcom_glink_driver_register(void *drv)
+{
+ struct qcom_smd_driver *qidrv = drv;
+
+ if (!ipc_bus)
+ return 0;
+
+ qidrv->driver.bus = ipc_bus;
+ return driver_register(&qidrv->driver);
+}
+EXPORT_SYMBOL(qcom_glink_driver_register);
+
+/**
+ * qcom_smd_driver_register - register a smd driver
+ * @qidrv: qcom_smd_driver struct
+ */
+void qcom_glink_driver_unregister(void *drv)
+{
+ struct qcom_smd_driver *qidrv = drv;
+
+ if (!ipc_bus)
+ return;
+
+ driver_unregister(&qidrv->driver);
+}
+EXPORT_SYMBOL(qcom_glink_driver_unregister);
+MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
new file mode 100644
index 0000000000000..6242e867fe720
--- /dev/null
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -0,0 +1,201 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_XPRT_IF_H_
+#define _SOC_QCOM_GLINK_XPRT_IF_H_
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct glink_core_xprt_ctx;
+struct glink_core_if;
+struct channel_ctx;
+struct glink_core_rx_intent;
+
+enum buf_type {
+ LINEAR = 0,
+ VECTOR,
+};
+
+enum xprt_ids {
+ SMEM_XPRT_ID = 100,
+ SMD_TRANS_XPRT_ID = 200,
+ LLOOP_XPRT_ID = 300,
+ MOCK_XPRT_HIGH_ID = 390,
+ MOCK_XPRT_ID = 400,
+ MOCK_XPRT_LOW_ID = 410,
+};
+
+#define GCAP_SIGNALS BIT(0)
+#define GCAP_INTENTLESS BIT(1)
+#define GCAP_TRACER_PKT BIT(2)
+#define GCAP_AUTO_QUEUE_RX_INT BIT(3)
+
+/**
+ * struct glink_core_tx_pkt - Transmit Packet information
+ * @list_done: Index to the channel's transmit queue.
+ * @list_done: Index to the channel's acknowledgment queue.
+ * @pkt_priv: Private information specific to the packet.
+ * @data: Pointer to the buffer containing the data.
+ * @riid: Remote receive intent used to transmit the packet.
+ * @rcid: Remote channel receiving the packet.
+ * @size: Total size of the data in the packet.
+ * @tx_len: Data length to transmit in the current transmit slot.
+ * @size_remaining: Remaining size of the data in the packet.
+ * @intent_size: Receive intent size queued by the remote side.
+ * @tracer_pkt: Flag to indicate if the packet is a tracer packet.
+ * @iovec: Pointer to the vector buffer packet.
+ * @vprovider: Packet-specific virtual buffer provider function.
+ * @pprovider: Packet-specific physical buffer provider function.
+ * @pkt_ref: Active references to the packet.
+ */
+struct glink_core_tx_pkt {
+ struct list_head list_node;
+ struct list_head list_done;
+ const void *pkt_priv;
+ const void *data;
+ uint32_t riid;
+ uint32_t rcid;
+ uint32_t size;
+ uint32_t tx_len;
+ uint32_t size_remaining;
+ size_t intent_size;
+ bool tracer_pkt;
+ void *iovec;
+ void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+ void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+ struct rwref_lock pkt_ref;
+};
+
+/**
+ * Note - each call to register the interface must pass a unique
+ * instance of this data.
+ */
+struct glink_transport_if {
+ /* Negotiation */
+ void (*tx_cmd_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+ uint32_t (*set_version)(struct glink_transport_if *if_ptr,
+ uint32_t version,
+ uint32_t features);
+
+ /* channel state */
+ int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ const char *name, uint16_t req_xprt);
+ int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr,
+ uint32_t lcid);
+ void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid, uint16_t xprt_resp);
+ void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr,
+ uint32_t rcid);
+ int (*ssr)(struct glink_transport_if *if_ptr);
+
+ /* channel data */
+ int (*allocate_rx_intent)(struct glink_transport_if *if_ptr,
+ size_t size,
+ struct glink_core_rx_intent *intent);
+ int (*deallocate_rx_intent)(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent);
+ /* Optional */
+ int (*reuse_rx_intent)(struct glink_transport_if *if_ptr,
+ struct glink_core_rx_intent *intent);
+
+ int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size, uint32_t liid);
+ void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t liid, bool reuse);
+ int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ struct glink_core_tx_pkt *pctx);
+ int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, size_t size);
+ int (*tx_cmd_remote_rx_intent_req_ack)(
+ struct glink_transport_if *if_ptr,
+ uint32_t lcid, bool granted);
+ int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, uint32_t sigs);
+
+ /* Optional. If NULL at xprt registration, dummies will be used */
+ int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid);
+ int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid,
+ bool mask, void *pstruct);
+ int (*wait_link_down)(struct glink_transport_if *if_ptr);
+ int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr,
+ uint32_t lcid, struct glink_core_tx_pkt *pctx);
+ unsigned long (*get_power_vote_ramp_time)(
+ struct glink_transport_if *if_ptr, uint32_t state);
+ int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
+ int (*power_unvote)(struct glink_transport_if *if_ptr);
+ /*
+ * Keep data pointers at the end of the structure after all function
+ * pointer to allow for in-place initialization.
+ */
+
+ /* private pointer for core */
+ struct glink_core_xprt_ctx *glink_core_priv;
+
+ /* core pointer (set during transport registration) */
+ struct glink_core_if *glink_core_if_ptr;
+};
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * get_tx_vaddr() - Get the virtual address from which the tx has to be done
+ * @pctx: transmit packet context.
+ * @offset: offset into the packet.
+ * @tx_size: pointer to hold the length of the contiguous buffer
+ * space.
+ *
+ * Return: Address from which the tx has to be done.
+ */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+ size_t *tx_size)
+{
+ void *pdata;
+
+ if (pctx->vprovider) {
+ return pctx->vprovider((void *)pctx->iovec, offset, tx_size);
+ } else if (pctx->pprovider) {
+ pdata = pctx->pprovider((void *)pctx->iovec, offset, tx_size);
+ return phys_to_virt((unsigned long)pdata);
+ }
+ return NULL;
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name: Name of the transport.
+ * @id: Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id);
+
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+ size_t *tx_size)
+{
+ return NULL;
+}
+
+static inline int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_XPRT_IF_H_ */
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
new file mode 100644
index 0000000000000..2fbdc70114641
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -0,0 +1,27 @@
+#
+# Makefile for msm-bus driver specific files
+#
+obj-y += msm_bus_bimc.o msm_bus_noc.o msm_bus_core.o msm_bus_client_api.o
+obj-$(CONFIG_OF) += msm_bus_of.o
+obj-$(CONFIG_QCOM_SMD_RPM) += msm_bus_rpm_smd.o
+obj-$(CONFIG_QCOM_SMD_RPM) += qcom_rpm_msm_bus.o
+
+ifdef CONFIG_BUS_TOPOLOGY_ADHOC
+ obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o
+ obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+ obj-$(CONFIG_DEBUG_BUS_VOTER) += msm_bus_dbg_voter.o
+ # FIXME remove it temporarily till this driver is ported and tested
+ #obj-$(CONFIG_CORESIGHT) += msm_buspm_coresight_adhoc.o
+else
+ obj-y += msm_bus_fabric.o msm_bus_config.o msm_bus_arb.o
+ obj-$(CONFIG_CORESIGHT) += msm_buspm_coresight.o
+endif
+
+ifdef CONFIG_ARCH_MSM8974
+ obj-$(CONFIG_ARCH_MSM8974) += msm_bus_board_8974.o
+else
+ obj-y += msm_bus_id.o
+endif
+
+obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
+obj-$(CONFIG_MSM_BUSPM_DEV) += msm-buspm-dev.o
diff --git a/drivers/soc/qcom/msm_bus/msm-buspm-dev.c b/drivers/soc/qcom/msm_bus/msm-buspm-dev.c
new file mode 100644
index 0000000000000..867e7378448c3
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm-buspm-dev.c
@@ -0,0 +1,366 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* #define DEBUG */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/rpm-smd.h>
+#include <uapi/linux/msm-buspm-dev.h>
+
+#define MSM_BUSPM_DRV_NAME "msm-buspm-dev"
+
+#ifdef CONFIG_COMPAT
+static long
+msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define msm_buspm_dev_compat_ioctl NULL
+#endif
+
+static long
+msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma);
+static int msm_buspm_dev_release(struct inode *inode, struct file *filp);
+static int msm_buspm_dev_open(struct inode *inode, struct file *filp);
+
+static const struct file_operations msm_buspm_dev_fops = {
+ .owner = THIS_MODULE,
+ .mmap = msm_buspm_dev_mmap,
+ .open = msm_buspm_dev_open,
+ .unlocked_ioctl = msm_buspm_dev_ioctl,
+ .compat_ioctl = msm_buspm_dev_compat_ioctl,
+ .llseek = noop_llseek,
+ .release = msm_buspm_dev_release,
+};
+
+struct miscdevice msm_buspm_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MSM_BUSPM_DRV_NAME,
+ .fops = &msm_buspm_dev_fops,
+};
+
+
+enum msm_buspm_spdm_res {
+ SPDM_RES_ID = 0,
+ SPDM_RES_TYPE = 0x63707362,
+ SPDM_KEY = 0x00006e65,
+ SPDM_SIZE = 4,
+};
+/*
+ * Allocate kernel buffer.
+ * Currently limited to one buffer per file descriptor. If alloc() is
+ * called twice for the same descriptor, the original buffer is freed.
+ * There is also no locking protection so the same descriptor can not be shared.
+ */
+
+static inline void *msm_buspm_dev_get_vaddr(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ return (dev) ? dev->vaddr : NULL;
+}
+
+static inline unsigned int msm_buspm_dev_get_buflen(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ return dev ? dev->buflen : 0;
+}
+
+static inline unsigned long msm_buspm_dev_get_paddr(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ return (dev) ? dev->paddr : 0L;
+}
+
+static void msm_buspm_dev_free(struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ if (dev && dev->vaddr) {
+ pr_debug("freeing memory at 0x%p\n", dev->vaddr);
+ dma_free_coherent(msm_buspm_misc.this_device, dev->buflen,
+ dev->vaddr, dev->paddr);
+ dev->paddr = 0L;
+ dev->vaddr = NULL;
+ }
+}
+
+static int msm_buspm_dev_open(struct inode *inode, struct file *filp)
+{
+ struct msm_buspm_map_dev *dev;
+
+ if (capable(CAP_SYS_ADMIN)) {
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (dev)
+ filp->private_data = dev;
+ else
+ return -ENOMEM;
+ } else {
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int
+msm_buspm_dev_alloc(struct file *filp, struct buspm_alloc_params data)
+{
+ dma_addr_t paddr;
+ void *vaddr;
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ /* If buffer already allocated, then free it */
+ if (dev->vaddr)
+ msm_buspm_dev_free(filp);
+
+ /* Allocate uncached memory */
+ vaddr = dma_alloc_coherent(msm_buspm_misc.this_device, data.size,
+ &paddr, GFP_KERNEL);
+
+ if (vaddr == NULL) {
+ pr_err("allocation of 0x%zu bytes failed", data.size);
+ return -ENOMEM;
+ }
+
+ dev->vaddr = vaddr;
+ dev->paddr = paddr;
+ dev->buflen = data.size;
+ filp->f_pos = 0;
+ pr_debug("virt addr = 0x%p\n", dev->vaddr);
+ pr_debug("phys addr = 0x%lx\n", dev->paddr);
+
+ return 0;
+}
+
+static int msm_bus_rpm_req(u32 rsc_type, u32 key, u32 hwid,
+ int ctx, u32 val)
+{
+ struct msm_rpm_request *rpm_req;
+ int ret, msg_id;
+
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, SPDM_RES_ID, 1);
+ if (rpm_req == NULL) {
+ pr_err("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)&val,
+ (int)(sizeof(uint32_t)));
+ if (ret) {
+ pr_err("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto err;
+ }
+
+ pr_debug("Added Key: %d, Val: %u, size: %zu\n", key,
+ (uint32_t)val, sizeof(uint32_t));
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ pr_err("RPM: No message ID for req\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ pr_err("RPM: Ack failed\n");
+ goto err;
+ }
+
+err:
+ msm_rpm_free_request(rpm_req);
+ return ret;
+}
+
+static int msm_buspm_ioc_cmds(uint32_t arg)
+{
+ switch (arg) {
+ case MSM_BUSPM_SPDM_CLK_DIS:
+ case MSM_BUSPM_SPDM_CLK_EN:
+ return msm_bus_rpm_req(SPDM_RES_TYPE, SPDM_KEY, 0,
+ MSM_RPM_CTX_ACTIVE_SET, arg);
+ default:
+ pr_warn("Unsupported ioctl command: %d\n", arg);
+ return -EINVAL;
+ }
+}
+
+
+
+static long
+msm_buspm_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct buspm_xfer_req xfer;
+ struct buspm_alloc_params alloc_data;
+ unsigned long paddr;
+ int retval = 0;
+ void *buf = msm_buspm_dev_get_vaddr(filp);
+ unsigned int buflen = msm_buspm_dev_get_buflen(filp);
+ unsigned char *dbgbuf = buf;
+
+ if (_IOC_TYPE(cmd) != MSM_BUSPM_IOC_MAGIC) {
+ pr_err("Wrong IOC_MAGIC.Exiting\n");
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case MSM_BUSPM_IOC_FREE:
+ pr_debug("cmd = 0x%x (FREE)\n", cmd);
+ msm_buspm_dev_free(filp);
+ break;
+
+ case MSM_BUSPM_IOC_ALLOC:
+ pr_debug("cmd = 0x%x (ALLOC)\n", cmd);
+ retval = __get_user(alloc_data.size, (uint32_t __user *)arg);
+
+ if (retval == 0)
+ retval = msm_buspm_dev_alloc(filp, alloc_data);
+ break;
+
+ case MSM_BUSPM_IOC_RD_PHYS_ADDR:
+ pr_debug("Read Physical Address\n");
+ paddr = msm_buspm_dev_get_paddr(filp);
+ if (paddr == 0L) {
+ retval = -EINVAL;
+ } else {
+ pr_debug("phys addr = 0x%lx\n", paddr);
+ retval = __put_user(paddr,
+ (unsigned long __user *)arg);
+ }
+ break;
+
+ case MSM_BUSPM_IOC_RDBUF:
+ if (!buf) {
+ retval = -EINVAL;
+ break;
+ }
+
+ pr_debug("Read Buffer: 0x%x%x%x%x\n",
+ dbgbuf[0], dbgbuf[1], dbgbuf[2], dbgbuf[3]);
+
+ if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if ((xfer.size <= buflen) &&
+ (copy_to_user((void __user *)xfer.data, buf,
+ xfer.size))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case MSM_BUSPM_IOC_WRBUF:
+ pr_debug("Write Buffer\n");
+
+ if (!buf) {
+ retval = -EINVAL;
+ break;
+ }
+
+ if (copy_from_user(&xfer, (void __user *)arg, sizeof(xfer))) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if ((buflen <= xfer.size) &&
+ (copy_from_user(buf, (void __user *)xfer.data,
+ xfer.size))) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case MSM_BUSPM_IOC_CMD:
+ pr_debug("IOCTL command: cmd: %d arg: %lu\n", cmd, arg);
+ retval = msm_buspm_ioc_cmds(arg);
+ break;
+
+ default:
+ pr_debug("Unknown command 0x%x\n", cmd);
+ retval = -EINVAL;
+ break;
+ }
+
+ return retval;
+}
+
+static int msm_buspm_dev_release(struct inode *inode, struct file *filp)
+{
+ struct msm_buspm_map_dev *dev = filp->private_data;
+
+ msm_buspm_dev_free(filp);
+ kfree(dev);
+ filp->private_data = NULL;
+
+ return 0;
+}
+
+static int msm_buspm_dev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ pr_debug("vma = 0x%p\n", vma);
+
+ /* Mappings are uncached */
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+msm_buspm_dev_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ return msm_buspm_dev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static int __init msm_buspm_dev_init(void)
+{
+ int ret = 0;
+
+ ret = misc_register(&msm_buspm_misc);
+ if (ret < 0)
+ pr_err("%s: Cannot register misc device\n", __func__);
+
+ if (msm_buspm_misc.this_device->coherent_dma_mask == 0)
+ msm_buspm_misc.this_device->coherent_dma_mask =
+ DMA_BIT_MASK(32);
+
+ return ret;
+}
+
+static void __exit msm_buspm_dev_exit(void)
+{
+ misc_deregister(&msm_buspm_misc);
+}
+module_init(msm_buspm_dev_init);
+module_exit(msm_buspm_dev_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("platform:"MSM_BUSPM_DRV_NAME);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
new file mode 100644
index 0000000000000..cad0c53f2c737
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
@@ -0,0 +1,147 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+struct link_node {
+ uint64_t lnode_ib[NUM_CTX];
+ uint64_t lnode_ab[NUM_CTX];
+ int next;
+ struct device *next_dev;
+ struct list_head link;
+ uint32_t in_use;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+ int (*qos_init)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*set_bw)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq);
+ int (*limit_mport)(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq, bool enable_lim,
+ uint64_t lim_bw);
+ bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+ uint64_t ab[NUM_CTX];
+ bool dirty;
+};
+
+struct msm_bus_fab_device_type {
+ void __iomem *qos_base;
+ phys_addr_t pqos_base;
+ size_t qos_range;
+ uint32_t base_offset;
+ uint32_t qos_freq;
+ uint32_t qos_off;
+ uint32_t util_fact;
+ uint32_t vrail_comp;
+ struct msm_bus_noc_ops noc_ops;
+ enum msm_bus_hw_sel bus_type;
+ bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+ int mode;
+ unsigned int prio_lvl;
+ unsigned int prio_rd;
+ unsigned int prio_wr;
+ unsigned int prio1;
+ unsigned int prio0;
+ unsigned int gp;
+ unsigned int thmp;
+ unsigned int ws;
+ int cur_mode;
+ u64 bw_buffer;
+};
+
+struct msm_bus_node_info_type {
+ const char *name;
+ unsigned int id;
+ int mas_rpm_id;
+ int slv_rpm_id;
+ int num_ports;
+ int num_qports;
+ int *qport;
+ struct qos_params_type qos_params;
+ unsigned int num_connections;
+ unsigned int num_blist;
+ bool is_fab_dev;
+ bool virt_dev;
+ bool is_traversed;
+ unsigned int *connections;
+ unsigned int *black_listed_connections;
+ struct device **dev_connections;
+ struct device **black_connections;
+ unsigned int bus_device_id;
+ struct device *bus_device;
+ unsigned int buswidth;
+ struct rule_update_path_info rule;
+ uint64_t lim_bw;
+};
+
+struct msm_bus_node_device_type {
+ struct msm_bus_node_info_type *node_info;
+ struct msm_bus_fab_device_type *fabdev;
+ int num_lnodes;
+ struct link_node *lnode_list;
+ uint64_t cur_clk_hz[NUM_CTX];
+ struct nodebw node_ab;
+ struct list_head link;
+ unsigned int ap_owned;
+ struct nodeclk clk[NUM_CTX];
+ struct nodeclk qos_clk;
+};
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+ bool throttle_en, uint64_t lim_bw);
+int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev,
+ int ctx, int **dirty_nodes, int *num_dirty);
+int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty);
+int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx,
+ int64_t add_bw, int **dirty_nodes, int *num_dirty);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+ struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb.c b/drivers/soc/qcom/msm_bus/msm_bus_arb.c
new file mode 100644
index 0000000000000..0a92e182210d6
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb.c
@@ -0,0 +1,1137 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define INDEX_MASK 0x0000FFFF
+#define PNODE_MASK 0xFFFF0000
+#define SHIFT_VAL 16
+#define CREATE_PNODE_ID(n, i) (((n) << SHIFT_VAL) | (i))
+#define GET_INDEX(n) ((n) & INDEX_MASK)
+#define GET_NODE(n) ((n) >> SHIFT_VAL)
+#define IS_NODE(n) ((n) % FABRIC_ID_KEY)
+#define SEL_FAB_CLK 1
+#define SEL_SLAVE_CLK 0
+/*
+ * To get to BIMC BW convert Hz to bytes by multiplying bus width(8),
+ * double-data-rate(2) * ddr-channels(2).
+ */
+#define GET_BIMC_BW(clk) (clk * 8 * 2 * 2)
+
+#define BW_TO_CLK_FREQ_HZ(width, bw) \
+ msm_bus_div64(width, bw)
+
+#define IS_MASTER_VALID(mas) \
+ (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+ ? 1 : 0)
+
+#define IS_SLAVE_VALID(slv) \
+ (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+static DEFINE_MUTEX(msm_bus_lock);
+
+/* This function uses shift operations to divide 64 bit value for higher
+ * efficiency. The divisor expected are number of ports or bus-width.
+ * These are expected to be 1, 2, 4, 8, 16 and 32 in most cases.
+ *
+ * To account for exception to the above divisor values, the standard
+ * do_div function is used.
+ * */
+uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+{
+ uint64_t *b = &bw;
+
+ if ((bw > 0) && (bw < w))
+ return 1;
+
+ switch (w) {
+ case 0:
+ WARN(1, "AXI: Divide by 0 attempted\n");
+ case 1: return bw;
+ case 2: return (bw >> 1);
+ case 4: return (bw >> 2);
+ case 8: return (bw >> 3);
+ case 16: return (bw >> 4);
+ case 32: return (bw >> 5);
+ }
+
+ do_div(*b, w);
+ return *b;
+}
+
+/**
+ * add_path_node: Adds the path information to the current node
+ * @info: Internal node info structure
+ * @next: Combination of the id and index of the next node
+ * Function returns: Number of pnodes (path_nodes) on success,
+ * error on failure.
+ *
+ * Every node maintains the list of path nodes. A path node is
+ * reached by finding the node-id and index stored at the current
+ * node. This makes updating the paths with requested bw and clock
+ * values efficient, as it avoids lookup for each update-path request.
+ */
+static int add_path_node(struct msm_bus_inode_info *info, int next)
+{
+ struct path_node *pnode;
+ int i;
+ if (ZERO_OR_NULL_PTR(info)) {
+ MSM_BUS_ERR("Cannot find node info!: id :%d\n",
+ info->node_info->priv_id);
+ return -ENXIO;
+ }
+
+ for (i = 0; i <= info->num_pnodes; i++) {
+ if (info->pnode[i].next == -2) {
+ MSM_BUS_DBG("Reusing pnode for info: %d at index: %d\n",
+ info->node_info->priv_id, i);
+ info->pnode[i].clk[DUAL_CTX] = 0;
+ info->pnode[i].clk[ACTIVE_CTX] = 0;
+ info->pnode[i].bw[DUAL_CTX] = 0;
+ info->pnode[i].bw[ACTIVE_CTX] = 0;
+ info->pnode[i].next = next;
+ MSM_BUS_DBG("%d[%d] : (%d, %d)\n",
+ info->node_info->priv_id, i, GET_NODE(next),
+ GET_INDEX(next));
+ return i;
+ }
+ }
+
+ info->num_pnodes++;
+ pnode = krealloc(info->pnode,
+ ((info->num_pnodes + 1) * sizeof(struct path_node))
+ , GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(pnode)) {
+ MSM_BUS_ERR("Error creating path node!\n");
+ info->num_pnodes--;
+ return -ENOMEM;
+ }
+ info->pnode = pnode;
+ info->pnode[info->num_pnodes].clk[DUAL_CTX] = 0;
+ info->pnode[info->num_pnodes].clk[ACTIVE_CTX] = 0;
+ info->pnode[info->num_pnodes].bw[DUAL_CTX] = 0;
+ info->pnode[info->num_pnodes].bw[ACTIVE_CTX] = 0;
+ info->pnode[info->num_pnodes].next = next;
+ MSM_BUS_DBG("%d[%d] : (%d, %d)\n", info->node_info->priv_id,
+ info->num_pnodes, GET_NODE(next), GET_INDEX(next));
+ return info->num_pnodes;
+}
+
+static int clearvisitedflag(struct device *dev, void *data)
+{
+ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+ fabdev->visited = false;
+ return 0;
+}
+
+/**
+ * getpath() - Finds the path from the topology between src and dest
+ * @src: Source. This is the master from which the request originates
+ * @dest: Destination. This is the slave to which we're trying to reach
+ *
+ * Function returns: next_pnode_id. The higher 16 bits of the next_pnode_id
+ * represent the src id of the next node on path. The lower 16 bits of the
+ * next_pnode_id represent the "index", which is the next entry in the array
+ * of pnodes for that node to fill in clk and bw values. This is created using
+ * CREATE_PNODE_ID. The return value is stored in ret_pnode, and this is added
+ * to the list of path nodes.
+ *
+ * This function recursively finds the path by updating the src to the
+ * closest possible node to dest.
+ */
+static int getpath(int src, int dest)
+{
+ int pnode_num = -1, i;
+ struct msm_bus_fabnodeinfo *fabnodeinfo;
+ struct msm_bus_fabric_device *fabdev;
+ int next_pnode_id = -1;
+ struct msm_bus_inode_info *info = NULL;
+ int _src = src/FABRIC_ID_KEY;
+ int _dst = dest/FABRIC_ID_KEY;
+ int ret_pnode = -1;
+ int fabid = GET_FABID(src);
+
+ /* Find the location of fabric for the src */
+ MSM_BUS_DBG("%d --> %d\n", src, dest);
+
+ fabdev = msm_bus_get_fabric_device(fabid);
+ if (!fabdev) {
+ MSM_BUS_WARN("Fabric Not yet registered. Try again\n");
+ return -ENXIO;
+ }
+
+ /* Are we there yet? */
+ if (src == dest) {
+ info = fabdev->algo->find_node(fabdev, src);
+ if (ZERO_OR_NULL_PTR(info)) {
+ MSM_BUS_ERR("Node %d not found\n", dest);
+ return -ENXIO;
+ }
+
+ for (i = 0; i <= info->num_pnodes; i++) {
+ if (info->pnode[i].next == -2) {
+ MSM_BUS_DBG("src = dst Reusing pnode for"
+ " info: %d at index: %d\n",
+ info->node_info->priv_id, i);
+ next_pnode_id = CREATE_PNODE_ID(src, i);
+ info->pnode[i].clk[DUAL_CTX] = 0;
+ info->pnode[i].bw[DUAL_CTX] = 0;
+ info->pnode[i].next = next_pnode_id;
+ MSM_BUS_DBG("returning: %d, %d\n", GET_NODE
+ (next_pnode_id), GET_INDEX(next_pnode_id));
+ return next_pnode_id;
+ }
+ }
+ next_pnode_id = CREATE_PNODE_ID(src, (info->num_pnodes + 1));
+ pnode_num = add_path_node(info, next_pnode_id);
+ if (pnode_num < 0) {
+ MSM_BUS_ERR("Error adding path node\n");
+ return -ENXIO;
+ }
+ MSM_BUS_DBG("returning: %d, %d\n", GET_NODE(next_pnode_id),
+ GET_INDEX(next_pnode_id));
+ return next_pnode_id;
+ } else if (_src == _dst) {
+ /*
+ * src and dest belong to same fabric, find the destination
+ * from the radix tree
+ */
+ info = fabdev->algo->find_node(fabdev, dest);
+ if (ZERO_OR_NULL_PTR(info)) {
+ MSM_BUS_ERR("Node %d not found\n", dest);
+ return -ENXIO;
+ }
+
+ ret_pnode = getpath(info->node_info->priv_id, dest);
+ next_pnode_id = ret_pnode;
+ } else {
+ /* find the dest fabric */
+ int trynextgw = true;
+ struct list_head *gateways = fabdev->algo->get_gw_list(fabdev);
+ list_for_each_entry(fabnodeinfo, gateways, list) {
+ /* see if the destination is at a connected fabric */
+ if (_dst == (fabnodeinfo->info->node_info->priv_id /
+ FABRIC_ID_KEY)) {
+ /* Found the fab on which the device exists */
+ info = fabnodeinfo->info;
+ trynextgw = false;
+ ret_pnode = getpath(info->node_info->priv_id,
+ dest);
+ pnode_num = add_path_node(info, ret_pnode);
+ if (pnode_num < 0) {
+ MSM_BUS_ERR("Error adding path node\n");
+ return -ENXIO;
+ }
+ next_pnode_id = CREATE_PNODE_ID(
+ info->node_info->priv_id, pnode_num);
+ break;
+ }
+ }
+
+ /* find the gateway */
+ if (trynextgw) {
+ gateways = fabdev->algo->get_gw_list(fabdev);
+ list_for_each_entry(fabnodeinfo, gateways, list) {
+ struct msm_bus_fabric_device *gwfab =
+ msm_bus_get_fabric_device(fabnodeinfo->
+ info->node_info->priv_id);
+ if (!gwfab) {
+ MSM_BUS_ERR("Err: No gateway found\n");
+ return -ENXIO;
+ }
+
+ if (!gwfab->visited) {
+ MSM_BUS_DBG("VISITED ID: %d\n",
+ gwfab->id);
+ gwfab->visited = true;
+ info = fabnodeinfo->info;
+ ret_pnode = getpath(info->
+ node_info->priv_id, dest);
+ pnode_num = add_path_node(info,
+ ret_pnode);
+ if (pnode_num < 0) {
+ MSM_BUS_ERR("Malloc failure in"
+ " adding path node\n");
+ return -ENXIO;
+ }
+ next_pnode_id = CREATE_PNODE_ID(
+ info->node_info->priv_id, pnode_num);
+ break;
+ }
+ }
+ if (next_pnode_id < 0)
+ return -ENXIO;
+ }
+ }
+
+ if (!IS_NODE(src)) {
+ MSM_BUS_DBG("Returning next_pnode_id:%d[%d]\n", GET_NODE(
+ next_pnode_id), GET_INDEX(next_pnode_id));
+ return next_pnode_id;
+ }
+ info = fabdev->algo->find_node(fabdev, src);
+ if (!info) {
+ MSM_BUS_ERR("Node info not found.\n");
+ return -ENXIO;
+ }
+
+ pnode_num = add_path_node(info, next_pnode_id);
+ MSM_BUS_DBG(" Last: %d[%d] = (%d, %d)\n",
+ src, info->num_pnodes, GET_NODE(next_pnode_id),
+ GET_INDEX(next_pnode_id));
+ MSM_BUS_DBG("returning: %d, %d\n", src, pnode_num);
+ return CREATE_PNODE_ID(src, pnode_num);
+}
+
+static uint64_t get_node_maxib(struct msm_bus_inode_info *info)
+{
+ int i, ctx;
+ uint64_t maxib = 0;
+
+ for (i = 0; i <= info->num_pnodes; i++) {
+ for (ctx = 0; ctx < NUM_CTX; ctx++)
+ maxib = max(info->pnode[i].clk[ctx], maxib);
+ }
+
+ MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__,
+ info->num_pnodes, info->node_info->id, maxib);
+ return maxib;
+}
+
+
+static uint64_t get_node_sumab(struct msm_bus_inode_info *info)
+{
+ int i;
+ uint64_t maxab = 0;
+
+ for (i = 0; i <= info->num_pnodes; i++)
+ maxab += info->pnode[i].bw[DUAL_CTX];
+
+ MSM_BUS_DBG("%s: Node %d numpnodes %d maxib %llu", __func__,
+ info->num_pnodes, info->node_info->id, maxab);
+ return maxab;
+}
+
+static uint64_t get_vfe_bw(void)
+{
+ int vfe_id = MSM_BUS_MASTER_VFE;
+ int iid = msm_bus_board_get_iid(vfe_id);
+ int fabid;
+ struct msm_bus_fabric_device *fabdev;
+ struct msm_bus_inode_info *info;
+ uint64_t vfe_bw = 0;
+
+ fabid = GET_FABID(iid);
+ fabdev = msm_bus_get_fabric_device(fabid);
+ info = fabdev->algo->find_node(fabdev, iid);
+
+ if (!info) {
+ MSM_BUS_ERR("%s: Can't find node %d", __func__,
+ vfe_id);
+ goto exit_get_vfe_bw;
+ }
+
+ vfe_bw = get_node_sumab(info);
+ MSM_BUS_DBG("vfe_ab %llu", vfe_bw);
+
+exit_get_vfe_bw:
+ return vfe_bw;
+}
+
+static uint64_t get_mdp_bw(void)
+{
+ int ids[] = {MSM_BUS_MASTER_MDP_PORT0, MSM_BUS_MASTER_MDP_PORT1};
+ int i;
+ uint64_t mdp_ab = 0;
+ uint32_t ff = 0;
+
+ for (i = 0; i < ARRAY_SIZE(ids); i++) {
+ int iid = msm_bus_board_get_iid(ids[i]);
+ int fabid;
+ struct msm_bus_fabric_device *fabdev;
+ struct msm_bus_inode_info *info;
+
+ fabid = GET_FABID(iid);
+ fabdev = msm_bus_get_fabric_device(fabid);
+ info = fabdev->algo->find_node(fabdev, iid);
+
+ if (!info) {
+ MSM_BUS_ERR("%s: Can't find node %d", __func__,
+ ids[i]);
+ continue;
+ }
+
+ mdp_ab += get_node_sumab(info);
+ MSM_BUS_DBG("mdp_ab %llu", mdp_ab);
+ ff = info->node_info->ff;
+ }
+
+ if (ff) {
+ mdp_ab = msm_bus_div64(2 * ff, 100 * mdp_ab);
+ } else {
+ MSM_BUS_ERR("MDP FF is 0");
+ mdp_ab = 0;
+ }
+
+
+ MSM_BUS_DBG("MDP BW %llu\n", mdp_ab);
+ return mdp_ab;
+}
+
+static uint64_t get_rt_bw(void)
+{
+ uint64_t rt_bw = 0;
+
+ rt_bw += get_mdp_bw();
+ rt_bw += get_vfe_bw();
+
+ return rt_bw;
+}
+
+static uint64_t get_avail_bw(struct msm_bus_fabric_device *fabdev)
+{
+ uint64_t fabclk_rate = 0;
+ int i;
+ uint64_t avail_bw = 0;
+ uint64_t rt_bw = get_rt_bw();
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+
+ if (!rt_bw)
+ goto exit_get_avail_bw;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ uint64_t ctx_rate;
+ ctx_rate =
+ fabric->info.nodeclk[i].rate;
+ fabclk_rate = max(ctx_rate, fabclk_rate);
+ }
+
+ if (!fabdev->eff_fact || !fabdev->nr_lim_thresh) {
+ MSM_BUS_ERR("Error: Eff-fact %d; nr_thresh %llu",
+ fabdev->eff_fact, fabdev->nr_lim_thresh);
+ return 0;
+ }
+
+ avail_bw = msm_bus_div64(100,
+ (GET_BIMC_BW(fabclk_rate) * fabdev->eff_fact));
+
+ if (avail_bw >= fabdev->nr_lim_thresh)
+ return 0;
+
+ MSM_BUS_DBG("%s: Total_avail_bw %llu, rt_bw %llu\n",
+ __func__, avail_bw, rt_bw);
+ trace_bus_avail_bw(avail_bw, rt_bw);
+
+ if (avail_bw < rt_bw) {
+ MSM_BUS_ERR("\n%s: ERROR avail BW %llu < MDP %llu",
+ __func__, avail_bw, rt_bw);
+ avail_bw = 0;
+ goto exit_get_avail_bw;
+ }
+ avail_bw -= rt_bw;
+
+exit_get_avail_bw:
+ return avail_bw;
+}
+
+static void program_nr_limits(struct msm_bus_fabric_device *fabdev)
+{
+ int num_nr_lim = 0;
+ int i;
+ struct msm_bus_inode_info *info[fabdev->num_nr_lim];
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+
+ num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree,
+ (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim,
+ MASTER_NODE);
+
+ for (i = 0; i < num_nr_lim; i++)
+ fabdev->algo->config_limiter(fabdev, info[i]);
+}
+
+static int msm_bus_commit_limiter(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+ MSM_BUS_DBG("fabid: %d\n", fabdev->id);
+ program_nr_limits(fabdev);
+ return ret;
+}
+
+static void compute_nr_limits(struct msm_bus_fabric_device *fabdev, int pnode)
+{
+ uint64_t total_ib = 0;
+ int num_nr_lim = 0;
+ uint64_t avail_bw = 0;
+ struct msm_bus_inode_info *info[fabdev->num_nr_lim];
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ int i;
+
+ num_nr_lim = radix_tree_gang_lookup_tag(&fabric->fab_tree,
+ (void **)&info, fabric->fabdev.id, fabdev->num_nr_lim,
+ MASTER_NODE);
+
+ MSM_BUS_DBG("%s: Found %d NR LIM nodes", __func__, num_nr_lim);
+ for (i = 0; i < num_nr_lim; i++)
+ total_ib += get_node_maxib(info[i]);
+
+ avail_bw = get_avail_bw(fabdev);
+ MSM_BUS_DBG("\n %s: Avail BW %llu", __func__, avail_bw);
+
+ for (i = 0; i < num_nr_lim; i++) {
+ uint32_t node_pct = 0;
+ uint64_t new_lim_bw = 0;
+ uint64_t node_max_ib = 0;
+ uint32_t node_max_ib_kB = 0;
+ uint32_t total_ib_kB = 0;
+ uint64_t bw_node;
+
+ node_max_ib = get_node_maxib(info[i]);
+ node_max_ib_kB = msm_bus_div64(1024, node_max_ib);
+ total_ib_kB = msm_bus_div64(1024, total_ib);
+ node_pct = (node_max_ib_kB * 100) / total_ib_kB;
+ bw_node = node_pct * avail_bw;
+ new_lim_bw = msm_bus_div64(100, bw_node);
+
+ /*
+ * if limiter bw is more than the requested IB clip to
+ requested IB.
+ */
+ if (new_lim_bw >= node_max_ib)
+ new_lim_bw = node_max_ib;
+
+ /*
+ * if there is a floor bw for this nr lim node and
+ * if there is available bw to divy up among the nr masters
+ * and if the nr lim masters have a non zero vote and
+ * if the limited bw is below the floor for this node.
+ * then limit this node to the floor bw.
+ */
+ if (info[i]->node_info->floor_bw && node_max_ib && avail_bw &&
+ (new_lim_bw <= info[i]->node_info->floor_bw)) {
+ MSM_BUS_ERR("\nNode %d:Limiting BW:%llu < floor:%llu",
+ info[i]->node_info->id, new_lim_bw,
+ info[i]->node_info->floor_bw);
+ new_lim_bw = info[i]->node_info->floor_bw;
+ }
+
+ if (new_lim_bw != info[i]->cur_lim_bw) {
+ info[i]->cur_lim_bw = new_lim_bw;
+ MSM_BUS_DBG("NodeId %d: Requested IB %llu",
+ info[i]->node_info->id, node_max_ib);
+ MSM_BUS_DBG("Limited to %llu(%d pct of Avail %llu )\n",
+ new_lim_bw, node_pct, avail_bw);
+ } else {
+ MSM_BUS_DBG("NodeId %d: No change Limited to %llu\n",
+ info[i]->node_info->id, info[i]->cur_lim_bw);
+ }
+ }
+}
+
+static void setup_nr_limits(int curr, int pnode)
+{
+ struct msm_bus_fabric_device *fabdev =
+ msm_bus_get_fabric_device(GET_FABID(curr));
+ struct msm_bus_inode_info *info;
+
+ if (!fabdev) {
+ MSM_BUS_WARN("Fabric Not yet registered. Try again\n");
+ goto exit_setup_nr_limits;
+ }
+
+ /* This logic is currently applicable to BIMC masters only */
+ if (fabdev->id != MSM_BUS_FAB_DEFAULT) {
+ MSM_BUS_ERR("Static limiting of NR masters only for BIMC\n");
+ goto exit_setup_nr_limits;
+ }
+
+ info = fabdev->algo->find_node(fabdev, curr);
+ if (!info) {
+ MSM_BUS_ERR("Cannot find node info!\n");
+ goto exit_setup_nr_limits;
+ }
+
+ compute_nr_limits(fabdev, pnode);
+exit_setup_nr_limits:
+ return;
+}
+
+static bool is_nr_lim(int id)
+{
+ struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device
+ (GET_FABID(id));
+ struct msm_bus_inode_info *info;
+ bool ret = false;
+
+ if (!fabdev) {
+ MSM_BUS_ERR("Bus device for bus ID: %d not found!\n",
+ GET_FABID(id));
+ goto exit_is_nr_lim;
+ }
+
+ info = fabdev->algo->find_node(fabdev, id);
+ if (!info)
+ MSM_BUS_ERR("Cannot find node info %d!\n", id);
+ else if ((info->node_info->nr_lim || info->node_info->rt_mas))
+ ret = true;
+exit_is_nr_lim:
+ return ret;
+}
+
+/**
+ * update_path() - Update the path with the bandwidth and clock values, as
+ * requested by the client.
+ *
+ * @curr: Current source node, as specified in the client vector (master)
+ * @pnode: The first-hop node on the path, stored in the internal client struct
+ * @req_clk: Requested clock value from the vector
+ * @req_bw: Requested bandwidth value from the vector
+ * @curr_clk: Current clock frequency
+ * @curr_bw: Currently allocated bandwidth
+ *
+ * This function updates the nodes on the path calculated using getpath(), with
+ * clock and bandwidth values. The sum of bandwidths, and the max of clock
+ * frequencies is calculated at each node on the path. Commit data to be sent
+ * to RPM for each master and slave is also calculated here.
+ */
+static int update_path(int curr, int pnode, uint64_t req_clk, uint64_t req_bw,
+ uint64_t curr_clk, uint64_t curr_bw, unsigned int ctx, unsigned int
+ cl_active_flag)
+{
+ int index, ret = 0;
+ struct msm_bus_inode_info *info;
+ struct msm_bus_inode_info *src_info;
+ int next_pnode;
+ int64_t add_bw = req_bw - curr_bw;
+ uint64_t bwsum = 0;
+ uint64_t req_clk_hz, curr_clk_hz, bwsum_hz;
+ int *master_tiers;
+ struct msm_bus_fabric_device *fabdev = msm_bus_get_fabric_device
+ (GET_FABID(curr));
+
+ if (!fabdev) {
+ MSM_BUS_ERR("Bus device for bus ID: %d not found!\n",
+ GET_FABID(curr));
+ return -ENXIO;
+ }
+
+ MSM_BUS_DBG("args: %d %d %d %llu %llu %llu %llu %u\n",
+ curr, GET_NODE(pnode), GET_INDEX(pnode), req_clk, req_bw,
+ curr_clk, curr_bw, ctx);
+ index = GET_INDEX(pnode);
+ MSM_BUS_DBG("Client passed index :%d\n", index);
+ info = fabdev->algo->find_node(fabdev, curr);
+ if (!info) {
+ MSM_BUS_ERR("Cannot find node info!\n");
+ return -ENXIO;
+ }
+ src_info = info;
+
+ info->link_info.sel_bw = &info->link_info.bw[ctx];
+ info->link_info.sel_clk = &info->link_info.clk[ctx];
+ *info->link_info.sel_bw += add_bw;
+
+ info->pnode[index].sel_bw = &info->pnode[index].bw[ctx];
+
+ /**
+ * To select the right clock, AND the context with
+ * client active flag.
+ */
+ info->pnode[index].sel_clk = &info->pnode[index].clk[ctx &
+ cl_active_flag];
+ *info->pnode[index].sel_bw += add_bw;
+ *info->pnode[index].sel_clk = req_clk;
+
+ /**
+ * If master supports dual configuration, check if
+ * the configuration needs to be changed based on
+ * incoming requests
+ */
+ if (info->node_info->dual_conf) {
+ uint64_t node_maxib = 0;
+ node_maxib = get_node_maxib(info);
+ fabdev->algo->config_master(fabdev, info,
+ node_maxib, req_bw);
+ }
+
+ info->link_info.num_tiers = info->node_info->num_tiers;
+ info->link_info.tier = info->node_info->tier;
+ master_tiers = info->node_info->tier;
+
+ do {
+ struct msm_bus_inode_info *hop;
+ fabdev = msm_bus_get_fabric_device(GET_FABID(curr));
+ if (!fabdev) {
+ MSM_BUS_ERR("Fabric not found\n");
+ return -ENXIO;
+ }
+ MSM_BUS_DBG("id: %d\n", info->node_info->priv_id);
+
+ /* find next node and index */
+ next_pnode = info->pnode[index].next;
+ curr = GET_NODE(next_pnode);
+ index = GET_INDEX(next_pnode);
+ MSM_BUS_DBG("id:%d, next: %d\n", info->
+ node_info->priv_id, curr);
+
+ /* Get hop */
+ /* check if we are here as gateway, or does the hop belong to
+ * this fabric */
+ if (IS_NODE(curr))
+ hop = fabdev->algo->find_node(fabdev, curr);
+ else
+ hop = fabdev->algo->find_gw_node(fabdev, curr);
+ if (!hop) {
+ MSM_BUS_ERR("Null Info found for hop\n");
+ return -ENXIO;
+ }
+
+ hop->link_info.sel_bw = &hop->link_info.bw[ctx];
+ hop->link_info.sel_clk = &hop->link_info.clk[ctx];
+ *hop->link_info.sel_bw += add_bw;
+
+ hop->pnode[index].sel_bw = &hop->pnode[index].bw[ctx];
+ hop->pnode[index].sel_clk = &hop->pnode[index].clk[ctx &
+ cl_active_flag];
+
+ if (!hop->node_info->buswidth) {
+ MSM_BUS_WARN("No bus width found. Using default\n");
+ hop->node_info->buswidth = 8;
+ }
+ *hop->pnode[index].sel_clk = BW_TO_CLK_FREQ_HZ(hop->node_info->
+ buswidth, req_clk);
+ *hop->pnode[index].sel_bw += add_bw;
+ MSM_BUS_DBG("fabric: %d slave: %d, slave-width: %d info: %d\n",
+ fabdev->id, hop->node_info->priv_id, hop->node_info->
+ buswidth, info->node_info->priv_id);
+ /* Update Bandwidth */
+ fabdev->algo->update_bw(fabdev, hop, info, add_bw,
+ master_tiers, ctx);
+ bwsum = *hop->link_info.sel_bw;
+ /* Update Fabric clocks */
+ curr_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth,
+ curr_clk);
+ req_clk_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth,
+ req_clk);
+ bwsum_hz = BW_TO_CLK_FREQ_HZ(hop->node_info->buswidth,
+ bwsum);
+ /* Account for multiple channels if any */
+ if (hop->node_info->num_sports > 1)
+ bwsum_hz = msm_bus_div64(hop->node_info->num_sports,
+ bwsum_hz);
+ MSM_BUS_DBG("AXI: Hop: %d, ports: %d, bwsum_hz: %llu\n",
+ hop->node_info->id, hop->node_info->num_sports,
+ bwsum_hz);
+ MSM_BUS_DBG("up-clk: curr_hz: %llu, req_hz: %llu, bw_hz %llu\n",
+ curr_clk, req_clk, bwsum_hz);
+ ret = fabdev->algo->update_clks(fabdev, hop, index,
+ curr_clk_hz, req_clk_hz, bwsum_hz, SEL_FAB_CLK,
+ ctx, cl_active_flag);
+ if (ret)
+ MSM_BUS_WARN("Failed to update clk\n");
+ info = hop;
+ } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id);
+
+ /* Update BW, clk after exiting the loop for the last one */
+ if (!info) {
+ MSM_BUS_ERR("Cannot find node info!\n");
+ return -ENXIO;
+ }
+
+ /* Update slave clocks */
+ ret = fabdev->algo->update_clks(fabdev, info, index, curr_clk_hz,
+ req_clk_hz, bwsum_hz, SEL_SLAVE_CLK, ctx, cl_active_flag);
+ if (ret)
+ MSM_BUS_ERR("Failed to update clk\n");
+
+ if ((ctx == cl_active_flag) &&
+ ((src_info->node_info->nr_lim || src_info->node_info->rt_mas)))
+ setup_nr_limits(curr, pnode);
+
+ /* If freq is going down , apply the changes now before
+ * we commit clk data.
+ */
+ if ((req_clk < curr_clk) || (req_bw < curr_bw))
+ bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_commit_limiter);
+ return ret;
+}
+
+/**
+ * msm_bus_commit_fn() - Commits the data for fabric to rpm
+ * @dev: fabric device
+ * @data: NULL
+ */
+static int msm_bus_commit_fn(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+ MSM_BUS_DBG("Committing: fabid: %d\n", fabdev->id);
+ ret = fabdev->algo->commit(fabdev);
+ return ret;
+}
+
+static uint32_t register_client_legacy(struct msm_bus_scale_pdata *pdata)
+{
+ struct msm_bus_client *client = NULL;
+ int i;
+ int src, dest, nfab;
+ struct msm_bus_fabric_device *deffab;
+
+ deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT);
+ if (!deffab) {
+ MSM_BUS_ERR("Error finding default fabric\n");
+ return 0;
+ }
+
+ nfab = msm_bus_get_num_fab();
+ if (nfab < deffab->board_algo->board_nfab) {
+ MSM_BUS_ERR("Can't register client!\n"
+ "Num of fabrics up: %d\n",
+ nfab);
+ return 0;
+ }
+
+ if ((!pdata) || (pdata->usecase->num_paths == 0) || IS_ERR(pdata)) {
+ MSM_BUS_ERR("Cannot register client with null data\n");
+ return 0;
+ }
+
+ client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("Error allocating client\n");
+ return 0;
+ }
+
+ mutex_lock(&msm_bus_lock);
+ client->pdata = pdata;
+ client->curr = -1;
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ int *pnode;
+ struct msm_bus_fabric_device *srcfab;
+ pnode = krealloc(client->src_pnode, ((i + 1) * sizeof(int)),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(pnode)) {
+ MSM_BUS_ERR("Invalid Pnode ptr!\n");
+ continue;
+ } else
+ client->src_pnode = pnode;
+
+ if (!IS_MASTER_VALID(pdata->usecase->vectors[i].src)) {
+ MSM_BUS_ERR("Invalid Master ID %d in request!\n",
+ pdata->usecase->vectors[i].src);
+ goto err;
+ }
+
+ if (!IS_SLAVE_VALID(pdata->usecase->vectors[i].dst)) {
+ MSM_BUS_ERR("Invalid Slave ID %d in request!\n",
+ pdata->usecase->vectors[i].dst);
+ goto err;
+ }
+
+ src = msm_bus_board_get_iid(pdata->usecase->vectors[i].src);
+ if (src == -ENXIO) {
+ MSM_BUS_ERR("Master %d not supported. Client cannot be"
+ " registered\n",
+ pdata->usecase->vectors[i].src);
+ goto err;
+ }
+ dest = msm_bus_board_get_iid(pdata->usecase->vectors[i].dst);
+ if (dest == -ENXIO) {
+ MSM_BUS_ERR("Slave %d not supported. Client cannot be"
+ " registered\n",
+ pdata->usecase->vectors[i].dst);
+ goto err;
+ }
+ srcfab = msm_bus_get_fabric_device(GET_FABID(src));
+ if (!srcfab) {
+ MSM_BUS_ERR("Fabric not found\n");
+ goto err;
+ }
+
+ srcfab->visited = true;
+ pnode[i] = getpath(src, dest);
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, clearvisitedflag);
+ if (pnode[i] == -ENXIO) {
+ MSM_BUS_ERR("Cannot register client now! Try again!\n");
+ goto err;
+ }
+ }
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+ (uint32_t)client);
+ mutex_unlock(&msm_bus_lock);
+ MSM_BUS_DBG("ret: %u num_paths: %d\n", (uint32_t)client,
+ pdata->usecase->num_paths);
+ return (uint32_t)(client);
+err:
+ kfree(client->src_pnode);
+ kfree(client);
+ mutex_unlock(&msm_bus_lock);
+ return 0;
+}
+
+static int update_request_legacy(uint32_t cl, unsigned index)
+{
+ int i, ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ int pnode, src = 0, curr, ctx;
+ uint64_t req_clk = 0, req_bw = 0, curr_clk = 0, curr_bw = 0;
+ struct msm_bus_client *client = (struct msm_bus_client *)cl;
+ if (IS_ERR_OR_NULL(client)) {
+ MSM_BUS_ERR("msm_bus_scale_client update req error %d\n",
+ (uint32_t)client);
+ return -ENXIO;
+ }
+
+ mutex_lock(&msm_bus_lock);
+ if (client->curr == index)
+ goto err;
+
+ curr = client->curr;
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("Null pdata passed to update-request\n");
+ ret = -ENXIO;
+ goto err;
+ }
+
+ if (index >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ (uint32_t)client, index);
+ ret = -ENXIO;
+ goto err;
+ }
+
+ MSM_BUS_DBG("cl: %u index: %d curr: %d num_paths: %d\n",
+ cl, index, client->curr, client->pdata->usecase->num_paths);
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = msm_bus_board_get_iid(client->pdata->usecase[index].
+ vectors[i].src);
+ if (src == -ENXIO) {
+ MSM_BUS_ERR("Master %d not supported. Request cannot"
+ " be updated\n", client->pdata->usecase->
+ vectors[i].src);
+ goto err;
+ }
+
+ if (msm_bus_board_get_iid(client->pdata->usecase[index].
+ vectors[i].dst) == -ENXIO) {
+ MSM_BUS_ERR("Slave %d not supported. Request cannot"
+ " be updated\n", client->pdata->usecase->
+ vectors[i].dst);
+ }
+
+ pnode = client->src_pnode[i];
+ req_clk = client->pdata->usecase[index].vectors[i].ib;
+ req_bw = client->pdata->usecase[index].vectors[i].ab;
+ if (curr < 0) {
+ curr_clk = 0;
+ curr_bw = 0;
+ } else {
+ curr_clk = client->pdata->usecase[curr].vectors[i].ib;
+ curr_bw = client->pdata->usecase[curr].vectors[i].ab;
+ MSM_BUS_DBG("ab: %llu ib: %llu\n", curr_bw, curr_clk);
+ }
+
+ if (!pdata->active_only) {
+ ret = update_path(src, pnode, req_clk, req_bw,
+ curr_clk, curr_bw, 0, pdata->active_only);
+ if (ret) {
+ MSM_BUS_ERR("Update path failed! %d\n", ret);
+ goto err;
+ }
+ }
+
+ ret = update_path(src, pnode, req_clk, req_bw, curr_clk,
+ curr_bw, ACTIVE_CTX, pdata->active_only);
+ if (ret) {
+ MSM_BUS_ERR("Update Path failed! %d\n", ret);
+ goto err;
+ }
+ }
+
+ client->curr = index;
+ ctx = ACTIVE_CTX;
+ msm_bus_dbg_client_data(client->pdata, index, cl);
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_commit_fn);
+
+ /* For NR/RT limited masters, if freq is going up , apply the changes
+ * after we commit clk data.
+ */
+ if (is_nr_lim(src) && ((req_clk > curr_clk) || (req_bw > curr_bw)))
+ bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_commit_limiter);
+
+err:
+ mutex_unlock(&msm_bus_lock);
+ return ret;
+}
+
+static int reset_pnodes(int curr, int pnode)
+{
+ struct msm_bus_inode_info *info;
+ struct msm_bus_fabric_device *fabdev;
+ int index, next_pnode;
+ fabdev = msm_bus_get_fabric_device(GET_FABID(curr));
+ if (!fabdev) {
+ MSM_BUS_ERR("Fabric not found for: %d\n",
+ (GET_FABID(curr)));
+ return -ENXIO;
+ }
+
+ index = GET_INDEX(pnode);
+ info = fabdev->algo->find_node(fabdev, curr);
+ if (!info) {
+ MSM_BUS_ERR("Cannot find node info!\n");
+ return -ENXIO;
+ }
+
+ MSM_BUS_DBG("Starting the loop--remove\n");
+ do {
+ struct msm_bus_inode_info *hop;
+ fabdev = msm_bus_get_fabric_device(GET_FABID(curr));
+ if (!fabdev) {
+ MSM_BUS_ERR("Fabric not found\n");
+ return -ENXIO;
+ }
+
+ next_pnode = info->pnode[index].next;
+ info->pnode[index].next = -2;
+ curr = GET_NODE(next_pnode);
+ index = GET_INDEX(next_pnode);
+ if (IS_NODE(curr))
+ hop = fabdev->algo->find_node(fabdev, curr);
+ else
+ hop = fabdev->algo->find_gw_node(fabdev, curr);
+ if (!hop) {
+ MSM_BUS_ERR("Null Info found for hop\n");
+ return -ENXIO;
+ }
+
+ MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index,
+ info->pnode[index].next);
+ MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id,
+ info->num_pnodes);
+ info = hop;
+ } while (GET_NODE(info->pnode[index].next) != info->node_info->priv_id);
+
+ info->pnode[index].next = -2;
+ MSM_BUS_DBG("%d[%d] = %d\n", info->node_info->priv_id, index,
+ info->pnode[index].next);
+ MSM_BUS_DBG("num_pnodes: %d: %d\n", info->node_info->priv_id,
+ info->num_pnodes);
+ return 0;
+}
+
+int msm_bus_board_get_iid(int id)
+{
+ struct msm_bus_fabric_device *deffab;
+
+ deffab = msm_bus_get_fabric_device(MSM_BUS_FAB_DEFAULT);
+ if (!deffab) {
+ MSM_BUS_ERR("Error finding default fabric\n");
+ return -ENXIO;
+ }
+
+ return deffab->board_algo->get_iid(id);
+}
+
+void msm_bus_scale_client_reset_pnodes(uint32_t cl)
+{
+ int i, src, pnode, index;
+ struct msm_bus_client *client = (struct msm_bus_client *)(cl);
+ if (IS_ERR_OR_NULL(client)) {
+ MSM_BUS_ERR("msm_bus_scale_reset_pnodes error\n");
+ return;
+ }
+ index = 0;
+ for (i = 0; i < client->pdata->usecase->num_paths; i++) {
+ src = msm_bus_board_get_iid(
+ client->pdata->usecase[index].vectors[i].src);
+ pnode = client->src_pnode[i];
+ MSM_BUS_DBG("(%d, %d)\n", GET_NODE(pnode), GET_INDEX(pnode));
+ reset_pnodes(src, pnode);
+ }
+}
+
+static void unregister_client_legacy(uint32_t cl)
+{
+ int i;
+ struct msm_bus_client *client = (struct msm_bus_client *)(cl);
+ bool warn = false;
+ if (IS_ERR_OR_NULL(client))
+ return;
+
+ for (i = 0; i < client->pdata->usecase->num_paths; i++) {
+ if ((client->pdata->usecase[0].vectors[i].ab) ||
+ (client->pdata->usecase[0].vectors[i].ib)) {
+ warn = true;
+ break;
+ }
+ }
+
+ if (warn) {
+ int num_paths = client->pdata->usecase->num_paths;
+ int ab[num_paths], ib[num_paths];
+ WARN(1, "%s called unregister with non-zero vectors\n",
+ client->pdata->name);
+
+ /*
+ * Save client values and zero them out to
+ * cleanly unregister
+ */
+ for (i = 0; i < num_paths; i++) {
+ ab[i] = client->pdata->usecase[0].vectors[i].ab;
+ ib[i] = client->pdata->usecase[0].vectors[i].ib;
+ client->pdata->usecase[0].vectors[i].ab = 0;
+ client->pdata->usecase[0].vectors[i].ib = 0;
+ }
+
+ msm_bus_scale_client_update_request(cl, 0);
+
+ /* Restore client vectors if required for re-registering. */
+ for (i = 0; i < num_paths; i++) {
+ client->pdata->usecase[0].vectors[i].ab = ab[i];
+ client->pdata->usecase[0].vectors[i].ib = ib[i];
+ }
+ } else if (client->curr != 0)
+ msm_bus_scale_client_update_request(cl, 0);
+
+ MSM_BUS_DBG("Unregistering client %d\n", cl);
+ mutex_lock(&msm_bus_lock);
+ msm_bus_scale_client_reset_pnodes(cl);
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+ mutex_unlock(&msm_bus_lock);
+ kfree(client->src_pnode);
+ kfree(client);
+}
+
+void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops)
+{
+ arb_ops->register_client = register_client_legacy;
+ arb_ops->update_request = update_request_legacy;
+ arb_ops->unregister_client = unregister_client_legacy;
+}
+
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
new file mode 100644
index 0000000000000..324d66663d777
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
@@ -0,0 +1,1120 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is Mree software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define NUM_CL_HANDLES 50
+#define NUM_LNODES 3
+#define MAX_STR_CL 50
+
+struct bus_search_type {
+ struct list_head link;
+ struct list_head node_list;
+};
+
+struct handle_type {
+ int num_entries;
+ struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+struct list_head input_list;
+struct list_head apply_list;
+
+DEFINE_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ list_for_each_entry(bus_node, black_list, link) {
+ if (bus_node->node_info->id == id)
+ return true;
+ }
+ return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+ *traverse_list, struct list_head *route_list)
+{
+ struct bus_search_type *search_node;
+
+ if (list_empty(edge_list) && list_empty(traverse_list))
+ return;
+
+ search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(edge_list, traverse_list);
+ list_splice_init(traverse_list, &search_node->node_list);
+ list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ * */
+uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+{
+ uint64_t *b = &bw;
+
+ if ((bw > 0) && (bw < w))
+ return 1;
+
+ switch (w) {
+ case 0:
+ WARN(1, "AXI: Divide by 0 attempted\n");
+ case 1: return bw;
+ case 2: return (bw >> 1);
+ case 4: return (bw >> 2);
+ case 8: return (bw >> 3);
+ case 16: return (bw >> 4);
+ case 32: return (bw >> 5);
+ }
+
+ do_div(*b, w);
+ return *b;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bnode = dev->platform_data;
+
+ if (bnode)
+ ret = (bnode->node_info->id == *(unsigned int *)id);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+static int gen_lnode(struct device *dev,
+ int next_hop, int prev_idx)
+{
+ struct link_node *lnode;
+ struct msm_bus_node_device_type *cur_dev = NULL;
+ int lnode_idx = -1;
+
+ if (!dev)
+ goto exit_gen_lnode;
+
+ cur_dev = dev->platform_data;
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ goto exit_gen_lnode;
+ }
+
+ if (!cur_dev->num_lnodes) {
+ cur_dev->lnode_list = devm_kzalloc(dev,
+ sizeof(struct link_node) * NUM_LNODES,
+ GFP_KERNEL);
+ if (!cur_dev->lnode_list)
+ goto exit_gen_lnode;
+
+ lnode = cur_dev->lnode_list;
+ cur_dev->num_lnodes = NUM_LNODES;
+ lnode_idx = 0;
+ } else {
+ int i;
+ for (i = 0; i < cur_dev->num_lnodes; i++) {
+ if (!cur_dev->lnode_list[i].in_use)
+ break;
+ }
+
+ if (i < cur_dev->num_lnodes) {
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ } else {
+ struct link_node *realloc_list;
+ size_t cur_size = sizeof(struct link_node) *
+ cur_dev->num_lnodes;
+
+ cur_dev->num_lnodes += NUM_LNODES;
+ realloc_list = msm_bus_realloc_devmem(
+ dev,
+ cur_dev->lnode_list,
+ cur_size,
+ sizeof(struct link_node) *
+ cur_dev->num_lnodes, GFP_KERNEL);
+
+ if (!realloc_list)
+ goto exit_gen_lnode;
+
+ cur_dev->lnode_list = realloc_list;
+ lnode = &cur_dev->lnode_list[i];
+ lnode_idx = i;
+ }
+ }
+
+ lnode->in_use = 1;
+ if (next_hop == cur_dev->node_info->id) {
+ lnode->next = -1;
+ lnode->next_dev = NULL;
+ } else {
+ lnode->next = prev_idx;
+ lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &next_hop,
+ msm_bus_device_match_adhoc);
+ }
+
+ memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+ memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+ return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+ int lnode_idx)
+{
+ int ret = 0;
+
+ if (!cur_dev) {
+ MSM_BUS_ERR("%s: Null device ptr", __func__);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ if (lnode_idx != -1) {
+ if (!cur_dev->num_lnodes ||
+ (lnode_idx > (cur_dev->num_lnodes - 1))) {
+ MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+ __func__, lnode_idx, cur_dev->num_lnodes);
+ ret = -ENODEV;
+ goto exit_remove_lnode;
+ }
+
+ cur_dev->lnode_list[lnode_idx].next = -1;
+ cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+ cur_dev->lnode_list[lnode_idx].in_use = 0;
+ }
+
+exit_remove_lnode:
+ return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+ struct list_head *black_list, int found)
+{
+ struct bus_search_type *search_node, *temp_search_node;
+ struct msm_bus_node_device_type *bus_node;
+ struct list_head *bl_list;
+ struct list_head *temp_bl_list;
+ int search_dev_id = dest;
+ struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &dest,
+ msm_bus_device_match_adhoc);
+ int lnode_hop = -1;
+
+ if (!found)
+ goto reset_links;
+
+ if (!dest_dev) {
+ MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+ goto exit_prune_path;
+ }
+
+ lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop);
+
+ list_for_each_entry_reverse(search_node, route_list, link) {
+ list_for_each_entry(bus_node, &search_node->node_list, link) {
+ unsigned int i;
+ for (i = 0; i < bus_node->node_info->num_connections;
+ i++) {
+ if (bus_node->node_info->connections[i] ==
+ search_dev_id) {
+ dest_dev = bus_find_device(
+ &msm_bus_type,
+ NULL,
+ (void *)
+ &bus_node->node_info->
+ id,
+ msm_bus_device_match_adhoc);
+
+ if (!dest_dev) {
+ lnode_hop = -1;
+ goto reset_links;
+ }
+
+ lnode_hop = gen_lnode(dest_dev,
+ search_dev_id,
+ lnode_hop);
+ search_dev_id =
+ bus_node->node_info->id;
+ break;
+ }
+ }
+ }
+ }
+reset_links:
+ list_for_each_entry_safe(search_node, temp_search_node, route_list,
+ link) {
+ list_for_each_entry(bus_node, &search_node->node_list,
+ link)
+ bus_node->node_info->is_traversed = false;
+
+ list_del(&search_node->link);
+ kfree(search_node);
+ }
+
+ list_for_each_safe(bl_list, temp_bl_list, black_list)
+ list_del(bl_list);
+
+exit_prune_path:
+ return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+ struct list_head *black_list)
+{
+ unsigned int i;
+
+ for (i = 0; i < node->node_info->num_blist; i++) {
+ struct msm_bus_node_device_type *bdev;
+ bdev = node->node_info->black_connections[i]->platform_data;
+ list_add_tail(&bdev->link, black_list);
+ }
+}
+
+static int getpath(int src, int dest)
+{
+ struct list_head traverse_list;
+ struct list_head edge_list;
+ struct list_head route_list;
+ struct list_head black_list;
+ struct device *src_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ struct msm_bus_node_device_type *src_node;
+ struct bus_search_type *search_node;
+ int found = 0;
+ int depth_index = 0;
+ int first_hop = -1;
+
+ INIT_LIST_HEAD(&traverse_list);
+ INIT_LIST_HEAD(&edge_list);
+ INIT_LIST_HEAD(&route_list);
+ INIT_LIST_HEAD(&black_list);
+
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Cannot locate src dev %d", __func__, src);
+ goto exit_getpath;
+ }
+
+ src_node = src_dev->platform_data;
+ if (!src_node) {
+ MSM_BUS_ERR("%s:Fatal, Source dev %d not found", __func__, src);
+ goto exit_getpath;
+ }
+ list_add_tail(&src_node->link, &traverse_list);
+
+ while ((!found && !list_empty(&traverse_list))) {
+ struct msm_bus_node_device_type *bus_node = NULL;
+ /* Locate dest_id in the traverse list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ if (bus_node->node_info->id == dest) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ unsigned int i;
+ /* Setup the new edge list */
+ list_for_each_entry(bus_node, &traverse_list, link) {
+ /* Setup list of black-listed nodes */
+ setup_bl_list(bus_node, &black_list);
+
+ for (i = 0; i < bus_node->node_info->
+ num_connections; i++) {
+ bool skip;
+ struct msm_bus_node_device_type
+ *node_conn;
+ node_conn = bus_node->node_info->
+ dev_connections[i]->
+ platform_data;
+ if (node_conn->node_info->
+ is_traversed) {
+ MSM_BUS_ERR("Circ Path %d\n",
+ node_conn->node_info->id);
+ goto reset_traversed;
+ }
+ skip = chk_bl_list(&black_list,
+ bus_node->node_info->
+ connections[i]);
+ if (!skip) {
+ list_add_tail(&node_conn->link,
+ &edge_list);
+ node_conn->node_info->
+ is_traversed = true;
+ }
+ }
+ }
+
+ /* Keep tabs of the previous search list */
+ search_node = kzalloc(sizeof(struct bus_search_type),
+ GFP_KERNEL);
+ INIT_LIST_HEAD(&search_node->node_list);
+ list_splice_init(&traverse_list,
+ &search_node->node_list);
+ /* Add the previous search list to a route list */
+ list_add_tail(&search_node->link, &route_list);
+ /* Advancing the list depth */
+ depth_index++;
+ list_splice_init(&edge_list, &traverse_list);
+ }
+ }
+reset_traversed:
+ copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+ first_hop = prune_path(&route_list, dest, src, &black_list, found);
+
+exit_getpath:
+ return first_hop;
+}
+
+static uint64_t arbitrate_bus_req(struct msm_bus_node_device_type *bus_dev,
+ int ctx)
+{
+ int i;
+ uint64_t max_ib = 0;
+ uint64_t sum_ab = 0;
+ uint64_t bw_max_hz;
+ struct msm_bus_node_device_type *fab_dev = NULL;
+
+ /* Find max ib */
+ for (i = 0; i < bus_dev->num_lnodes; i++) {
+ max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+ sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+ }
+ /*
+ * Account for Util factor and vrail comp. The new aggregation
+ * formula is:
+ * Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+ * / bus-width
+ * util_fact and vrail comp are obtained from fabric's dts properties.
+ * They default to 100 if absent.
+ */
+ fab_dev = bus_dev->node_info->bus_device->platform_data;
+
+ /* Don't do this for virtual fabrics */
+ if (fab_dev && fab_dev->fabdev) {
+ sum_ab *= fab_dev->fabdev->util_fact;
+ sum_ab = msm_bus_div64(100, sum_ab);
+ max_ib *= 100;
+ max_ib = msm_bus_div64(fab_dev->fabdev->vrail_comp, max_ib);
+ }
+
+ /* Account for multiple channels if any */
+ if (bus_dev->node_info->num_qports > 1)
+ sum_ab = msm_bus_div64(bus_dev->node_info->num_qports,
+ sum_ab);
+
+ if (!bus_dev->node_info->buswidth) {
+ MSM_BUS_WARN("No bus width found for %d. Using default\n",
+ bus_dev->node_info->id);
+ bus_dev->node_info->buswidth = 8;
+ }
+
+ bw_max_hz = max(max_ib, sum_ab);
+ bw_max_hz = msm_bus_div64(bus_dev->node_info->buswidth,
+ bw_max_hz);
+
+ return bw_max_hz;
+}
+
+static void del_inp_list(struct list_head *list)
+{
+ struct rule_update_path_info *rule_node;
+ struct rule_update_path_info *rule_node_tmp;
+
+ list_for_each_entry_safe(rule_node, rule_node_tmp, list, link)
+ list_del(&rule_node->link);
+}
+
+static void del_op_list(struct list_head *list)
+{
+ struct rule_apply_rcm_info *rule;
+ struct rule_apply_rcm_info *rule_tmp;
+
+ list_for_each_entry_safe(rule, rule_tmp, list, link)
+ list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+ struct rule_apply_rcm_info *rule;
+ struct device *dev = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int ret = 0;
+ bool throttle_en = false;
+
+ list_for_each_entry(rule, list, link) {
+ if (!rule)
+ continue;
+
+ if (rule && (rule->after_clk_commit != after_clk_commit))
+ continue;
+
+ dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &rule->id,
+ msm_bus_device_match_adhoc);
+
+ if (!dev) {
+ MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+ continue;
+ }
+ dev_info = dev->platform_data;
+
+ throttle_en = ((rule->throttle == THROTTLE_ON) ? true : false);
+ ret = msm_bus_enable_limiter(dev_info, throttle_en,
+ rule->lim_bw);
+ if (ret)
+ MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+ }
+
+ return ret;
+}
+
+static uint64_t get_node_aggab(struct msm_bus_node_device_type *bus_dev)
+{
+ int i;
+ int ctx;
+ uint64_t max_agg_ab = 0;
+ uint64_t agg_ab = 0;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ for (i = 0; i < bus_dev->num_lnodes; i++)
+ agg_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+
+ if (bus_dev->node_info->num_qports > 1)
+ agg_ab = msm_bus_div64(bus_dev->node_info->num_qports,
+ agg_ab);
+
+ max_agg_ab = max(max_agg_ab, agg_ab);
+ }
+
+ return max_agg_ab;
+}
+
+static uint64_t get_node_ib(struct msm_bus_node_device_type *bus_dev)
+{
+ int i;
+ int ctx;
+ uint64_t max_ib = 0;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ for (i = 0; i < bus_dev->num_lnodes; i++)
+ max_ib = max(max_ib,
+ bus_dev->lnode_list[i].lnode_ib[ctx]);
+ }
+ return max_ib;
+}
+
+static int update_path(int src, int dest, uint64_t req_ib, uint64_t req_bw,
+ uint64_t cur_ib, uint64_t cur_bw, int src_idx, int ctx)
+{
+ struct device *src_dev = NULL;
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int curr_idx;
+ int ret = 0;
+ int *dirty_nodes = NULL;
+ int num_dirty = 0;
+ struct rule_update_path_info *rule_node;
+ bool rules_registered = msm_rule_are_rules_registered();
+
+ src_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Can't find source device %d", __func__, src);
+ ret = -ENODEV;
+ goto exit_update_path;
+ }
+
+ next_dev = src_dev;
+
+ if (src_idx < 0) {
+ MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+ curr_idx = src_idx;
+
+ INIT_LIST_HEAD(&input_list);
+ INIT_LIST_HEAD(&apply_list);
+
+ while (next_dev) {
+ dev_info = next_dev->platform_data;
+
+ if (curr_idx >= dev_info->num_lnodes) {
+ MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+ __func__, curr_idx, dev_info->num_lnodes);
+ ret = -ENXIO;
+ goto exit_update_path;
+ }
+
+ lnode = &dev_info->lnode_list[curr_idx];
+ lnode->lnode_ib[ctx] = req_ib;
+ lnode->lnode_ab[ctx] = req_bw;
+
+ dev_info->cur_clk_hz[ctx] = arbitrate_bus_req(dev_info, ctx);
+
+ /* Start updating the clocks at the first hop.
+ * Its ok to figure out the aggregated
+ * request at this node.
+ */
+ if (src_dev != next_dev) {
+ ret = msm_bus_update_clks(dev_info, ctx, &dirty_nodes,
+ &num_dirty);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to update clks dev %d",
+ __func__, dev_info->node_info->id);
+ goto exit_update_path;
+ }
+ }
+
+ ret = msm_bus_update_bw(dev_info, ctx, req_bw, &dirty_nodes,
+ &num_dirty);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to update bw dev %d",
+ __func__, dev_info->node_info->id);
+ goto exit_update_path;
+ }
+
+ if (rules_registered) {
+ rule_node = &dev_info->node_info->rule;
+ rule_node->id = dev_info->node_info->id;
+ rule_node->ib = get_node_ib(dev_info);
+ rule_node->ab = get_node_aggab(dev_info);
+ rule_node->clk = max(dev_info->cur_clk_hz[ACTIVE_CTX],
+ dev_info->cur_clk_hz[DUAL_CTX]);
+ list_add_tail(&rule_node->link, &input_list);
+ }
+
+ next_dev = lnode->next_dev;
+ curr_idx = lnode->next;
+ }
+
+ if (rules_registered) {
+ msm_rules_update_path(&input_list, &apply_list);
+ msm_bus_apply_rules(&apply_list, false);
+ }
+
+ msm_bus_commit_data(dirty_nodes, ctx, num_dirty);
+
+ if (rules_registered) {
+ msm_bus_apply_rules(&apply_list, true);
+ del_inp_list(&input_list);
+ del_op_list(&apply_list);
+ }
+exit_update_path:
+ return ret;
+}
+
+static int remove_path(int src, int dst, uint64_t cur_ib, uint64_t cur_ab,
+ int src_idx, int active_only)
+{
+ struct device *src_dev = NULL;
+ struct device *next_dev = NULL;
+ struct link_node *lnode = NULL;
+ struct msm_bus_node_device_type *dev_info = NULL;
+ int ret = 0;
+ int cur_idx = src_idx;
+ int next_idx;
+
+ /* Update the current path to zero out all request from
+ * this cient on all paths
+ */
+
+ ret = update_path(src, dst, 0, 0, cur_ib, cur_ab, src_idx,
+ active_only);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+ __func__, ACTIVE_CTX);
+ goto exit_remove_path;
+ }
+
+ src_dev = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+ if (!src_dev) {
+ MSM_BUS_ERR("%s: Can't find source device %d", __func__, src);
+ ret = -ENODEV;
+ goto exit_remove_path;
+ }
+
+ next_dev = src_dev;
+
+ while (next_dev) {
+ dev_info = next_dev->platform_data;
+ lnode = &dev_info->lnode_list[cur_idx];
+ next_idx = lnode->next;
+ next_dev = lnode->next_dev;
+ remove_lnode(dev_info, cur_idx);
+ cur_idx = next_idx;
+ }
+
+exit_remove_path:
+ return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+ struct device *dev_node;
+ struct device *dev_it;
+ unsigned int hop = 1;
+ int idx;
+ struct msm_bus_node_device_type *devinfo;
+ int i;
+
+ dev_node = bus_find_device(&msm_bus_type, NULL,
+ (void *) &src,
+ msm_bus_device_match_adhoc);
+
+ if (!dev_node) {
+ MSM_BUS_ERR("SRC NOT FOUND %d", src);
+ return;
+ }
+
+ idx = curr;
+ devinfo = dev_node->platform_data;
+ dev_it = dev_node;
+
+ MSM_BUS_ERR("Route list Src %d", src);
+ while (dev_it) {
+ struct msm_bus_node_device_type *busdev =
+ devinfo->node_info->bus_device->platform_data;
+
+ MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+ devinfo->node_info->id, active_only);
+
+ for (i = 0; i < NUM_CTX; i++) {
+ MSM_BUS_ERR("dev info sel ib %llu",
+ devinfo->cur_clk_hz[i]);
+ MSM_BUS_ERR("dev info sel ab %llu",
+ devinfo->node_ab.ab[i]);
+ }
+
+ dev_it = devinfo->lnode_list[idx].next_dev;
+ idx = devinfo->lnode_list[idx].next;
+ if (dev_it)
+ devinfo = dev_it->platform_data;
+
+ MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+ MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+ if (idx < 0)
+ break;
+ hop++;
+ }
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+ int i;
+ struct msm_bus_scale_pdata *pdata;
+ int lnode, src, curr, dest;
+ uint64_t cur_clk, cur_bw;
+ struct msm_bus_client *client;
+
+ mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+ client = handle_list.cl_list[cl];
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ curr = client->curr;
+ if (curr >= pdata->num_usecases) {
+ MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+ curr = 0;
+ }
+
+ MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = client->pdata->usecase[curr].vectors[i].src;
+ dest = client->pdata->usecase[curr].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+ cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+ remove_path(src, dest, cur_clk, cur_bw, lnode,
+ pdata->active_only);
+ }
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+ kfree(client->src_pnode);
+ kfree(client);
+ handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+ mutex_unlock(&msm_bus_adhoc_lock);
+ return;
+}
+
+static int alloc_handle_lst(int size)
+{
+ int ret = 0;
+ struct msm_bus_client **t_cl_list;
+
+ if (!handle_list.num_entries) {
+ t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+ * NUM_CL_HANDLES, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+ handle_list.cl_list = t_cl_list;
+ handle_list.num_entries += NUM_CL_HANDLES;
+ } else {
+ t_cl_list = krealloc(handle_list.cl_list,
+ sizeof(struct msm_bus_client *) *
+ handle_list.num_entries + NUM_CL_HANDLES,
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(t_cl_list)) {
+ ret = -ENOMEM;
+ MSM_BUS_ERR("%s: Failed to allocate handles list",
+ __func__);
+ goto exit_alloc_handle_lst;
+ }
+
+ memset(&handle_list.cl_list[handle_list.num_entries], 0,
+ NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+ handle_list.num_entries += NUM_CL_HANDLES;
+ handle_list.cl_list = t_cl_list;
+ }
+exit_alloc_handle_lst:
+ return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+ uint32_t handle = 0;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < handle_list.num_entries; i++) {
+ if (i && !handle_list.cl_list[i]) {
+ handle = i;
+ break;
+ }
+ }
+
+ if (!handle) {
+ ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to allocate handle list",
+ __func__);
+ goto exit_gen_handle;
+ }
+ handle = i + 1;
+ }
+ handle_list.cl_list[handle] = client;
+exit_gen_handle:
+ return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+ int src, dest;
+ int i;
+ struct msm_bus_client *client = NULL;
+ int *lnode;
+ uint32_t handle = 0;
+
+ mutex_lock(&msm_bus_adhoc_lock);
+ client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register_client;
+ }
+ client->pdata = pdata;
+
+ lnode = kzalloc(pdata->usecase->num_paths * sizeof(int), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(lnode)) {
+ MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+ goto exit_register_client;
+ }
+ client->src_pnode = lnode;
+
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = pdata->usecase->vectors[i].src;
+ dest = pdata->usecase->vectors[i].dst;
+
+ if ((src < 0) || (dest < 0)) {
+ MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+ __func__, src, dest);
+ goto exit_register_client;
+ }
+
+ lnode[i] = getpath(src, dest);
+ if (lnode[i] < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, src, dest);
+ goto exit_register_client;
+ }
+ }
+
+ handle = gen_handle(client);
+ msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+ handle);
+ MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
+ client->pdata->name);
+exit_register_client:
+ mutex_unlock(&msm_bus_adhoc_lock);
+ return handle;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+ int i, ret = 0;
+ struct msm_bus_scale_pdata *pdata;
+ int lnode, src, curr, dest;
+ uint64_t req_clk, req_bw, curr_clk, curr_bw;
+ struct msm_bus_client *client;
+ const char *test_cl = "Null";
+ bool log_transaction = false;
+
+ mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ client = handle_list.cl_list[cl];
+ pdata = client->pdata;
+ if (!pdata) {
+ MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+ __func__);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (index >= pdata->num_usecases) {
+ MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+ cl, index);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (client->curr == index) {
+ MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+ __func__, index);
+ goto exit_update_request;
+ }
+
+ curr = client->curr;
+ client->curr = index;
+
+ if (!strcmp(test_cl, pdata->name))
+ log_transaction = true;
+
+ MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+ cl, index, client->curr, client->pdata->usecase->num_paths);
+
+ msm_bus_dbg_client_data(client->pdata, index , cl);
+ for (i = 0; i < pdata->usecase->num_paths; i++) {
+ src = client->pdata->usecase[index].vectors[i].src;
+ dest = client->pdata->usecase[index].vectors[i].dst;
+
+ lnode = client->src_pnode[i];
+ req_clk = client->pdata->usecase[index].vectors[i].ib;
+ req_bw = client->pdata->usecase[index].vectors[i].ab;
+ if (curr < 0) {
+ curr_clk = 0;
+ curr_bw = 0;
+ } else {
+ curr_clk = client->pdata->usecase[curr].vectors[i].ib;
+ curr_bw = client->pdata->usecase[curr].vectors[i].ab;
+ MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+ curr_bw, curr_clk);
+ }
+
+ ret = update_path(src, dest, req_clk, req_bw,
+ curr_clk, curr_bw, lnode, pdata->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+ __func__, ret, ACTIVE_CTX);
+ goto exit_update_request;
+ }
+
+ if (log_transaction)
+ getpath_debug(src, lnode, pdata->active_only);
+ }
+ trace_bus_update_request_end(pdata->name);
+exit_update_request:
+ mutex_unlock(&msm_bus_adhoc_lock);
+ return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+ if (cl) {
+ kfree(cl->name);
+ kfree(cl);
+ cl = NULL;
+ }
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+ int ret = 0;
+ char *test_cl = "test-client";
+ bool log_transaction = false;
+
+ mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!cl) {
+ MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+ ret = -ENXIO;
+ goto exit_update_request;
+ }
+
+ if (!strcmp(test_cl, cl->name))
+ log_transaction = true;
+
+ msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+ if ((cl->cur_ib == ib) && (cl->cur_ab == ab)) {
+ MSM_BUS_DBG("%s:no change in request", cl->name);
+ goto exit_update_request;
+ }
+
+ ret = update_path(cl->mas, cl->slv, ib, ab, cl->cur_ib, cl->cur_ab,
+ cl->first_hop, cl->active_only);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+ __func__, ret, cl->active_only);
+ goto exit_update_request;
+ }
+
+ cl->cur_ib = ib;
+ cl->cur_ab = ab;
+
+ if (log_transaction)
+ getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+ trace_bus_update_request_end(cl->name);
+exit_update_request:
+ mutex_unlock(&msm_bus_adhoc_lock);
+
+ return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+ mutex_lock(&msm_bus_adhoc_lock);
+ if (!cl) {
+ MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+ __func__);
+ goto exit_unregister_client;
+ }
+
+ MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+ remove_path(cl->mas, cl->slv, cl->cur_ib, cl->cur_ab,
+ cl->first_hop, cl->active_only);
+
+ msm_bus_dbg_remove_client(cl);
+ kfree(cl);
+exit_unregister_client:
+ mutex_unlock(&msm_bus_adhoc_lock);
+ return;
+}
+
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+ struct msm_bus_client_handle *client = NULL;
+ int len = 0;
+
+ mutex_lock(&msm_bus_adhoc_lock);
+
+ if (!(mas && slv && name)) {
+ pr_err("%s: Error: src dst name num_paths are required",
+ __func__);
+ goto exit_register;
+ }
+
+ client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+ if (!client) {
+ MSM_BUS_ERR("%s: Error allocating client data", __func__);
+ goto exit_register;
+ }
+
+ len = strnlen(name, MAX_STR_CL);
+ client->name = kzalloc(len, GFP_KERNEL);
+ if (!client->name) {
+ MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+ strlcpy(client->name, name, MAX_STR_CL);
+ client->active_only = active_only;
+
+ client->mas = mas;
+ client->slv = slv;
+ client->first_hop = getpath(client->mas, client->slv);
+ if (client->first_hop < 0) {
+ MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+ __func__, client->mas, client->slv);
+ free_cl_mem(client);
+ goto exit_register;
+ }
+
+ MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+ client->name);
+ msm_bus_dbg_add_client(client);
+exit_register:
+ mutex_unlock(&msm_bus_adhoc_lock);
+ return client;
+}
+/**
+ * msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ * @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+ arb_ops->register_client = register_client_adhoc;
+ arb_ops->update_request = update_request_adhoc;
+ arb_ops->unregister_client = unregister_client_adhoc;
+
+ arb_ops->register_cl = register_adhoc;
+ arb_ops->unregister = unregister_adhoc;
+ arb_ops->update_bw = update_bw_adhoc;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc.c
new file mode 100644
index 0000000000000..78bfeb09fddc6
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.c
@@ -0,0 +1,2112 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+enum msm_bus_bimc_slave_block {
+ SLAVE_BLOCK_RESERVED = 0,
+ SLAVE_BLOCK_SLAVE_WAY,
+ SLAVE_BLOCK_XPU,
+ SLAVE_BLOCK_ARBITER,
+ SLAVE_BLOCK_SCMO,
+};
+
+enum bke_sw {
+ BKE_OFF = 0,
+ BKE_ON = 1,
+};
+
+/* M_Generic */
+
+#define M_REG_BASE(b) ((b) + 0x00008000)
+
+#define M_COMPONENT_INFO_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000000)
+enum bimc_m_component_info {
+ M_COMPONENT_INFO_RMSK = 0xffffff,
+ M_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000,
+ M_COMPONENT_INFO_INSTANCE_SHFT = 0x10,
+ M_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00,
+ M_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8,
+ M_COMPONENT_INFO_TYPE_BMSK = 0xff,
+ M_COMPONENT_INFO_TYPE_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_0_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000020)
+enum bimc_m_config_info_0 {
+ M_CONFIG_INFO_0_RMSK = 0xff00ffff,
+ M_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000,
+ M_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18,
+ M_CONFIG_INFO_0_CONNECTION_TYPE_BMSK = 0xff00,
+ M_CONFIG_INFO_0_CONNECTION_TYPE_SHFT = 0x8,
+ M_CONFIG_INFO_0_FUNC_BMSK = 0xff,
+ M_CONFIG_INFO_0_FUNC_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_1_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000030)
+enum bimc_m_config_info_1 {
+ M_CONFIG_INFO_1_RMSK = 0xffffffff,
+ M_CONFIG_INFO_1_SWAY_CONNECTIVITY_BMSK = 0xffffffff,
+ M_CONFIG_INFO_1_SWAY_CONNECTIVITY_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_2_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000040)
+enum bimc_m_config_info_2 {
+ M_CONFIG_INFO_2_RMSK = 0xffffffff,
+ M_CONFIG_INFO_2_M_DATA_WIDTH_BMSK = 0xffff0000,
+ M_CONFIG_INFO_2_M_DATA_WIDTH_SHFT = 0x10,
+ M_CONFIG_INFO_2_M_TID_WIDTH_BMSK = 0xff00,
+ M_CONFIG_INFO_2_M_TID_WIDTH_SHFT = 0x8,
+ M_CONFIG_INFO_2_M_MID_WIDTH_BMSK = 0xff,
+ M_CONFIG_INFO_2_M_MID_WIDTH_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_3_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000050)
+enum bimc_m_config_info_3 {
+ M_CONFIG_INFO_3_RMSK = 0xffffffff,
+ M_CONFIG_INFO_3_RCH_DEPTH_BMSK = 0xff000000,
+ M_CONFIG_INFO_3_RCH_DEPTH_SHFT = 0x18,
+ M_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000,
+ M_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10,
+ M_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff00,
+ M_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x8,
+ M_CONFIG_INFO_3_ACH_DEPTH_BMSK = 0xff,
+ M_CONFIG_INFO_3_ACH_DEPTH_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_4_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000060)
+enum bimc_m_config_info_4 {
+ M_CONFIG_INFO_4_RMSK = 0xffff,
+ M_CONFIG_INFO_4_REORDER_BUF_DEPTH_BMSK = 0xff00,
+ M_CONFIG_INFO_4_REORDER_BUF_DEPTH_SHFT = 0x8,
+ M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_BMSK = 0xff,
+ M_CONFIG_INFO_4_REORDER_TABLE_DEPTH_SHFT = 0x0,
+};
+
+#define M_CONFIG_INFO_5_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000070)
+enum bimc_m_config_info_5 {
+ M_CONFIG_INFO_5_RMSK = 0x111,
+ M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_BMSK = 0x100,
+ M_CONFIG_INFO_5_MP2ARB_PIPELINE_EN_SHFT = 0x8,
+ M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_BMSK = 0x10,
+ M_CONFIG_INFO_5_MPBUF_PIPELINE_EN_SHFT = 0x4,
+ M_CONFIG_INFO_5_M2MP_PIPELINE_EN_BMSK = 0x1,
+ M_CONFIG_INFO_5_M2MP_PIPELINE_EN_SHFT = 0x0,
+};
+
+#define M_INT_STATUS_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000100)
+enum bimc_m_int_status {
+ M_INT_STATUS_RMSK = 0x3,
+};
+
+#define M_INT_CLR_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000108)
+enum bimc_m_int_clr {
+ M_INT_CLR_RMSK = 0x3,
+};
+
+#define M_INT_EN_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000010c)
+enum bimc_m_int_en {
+ M_INT_EN_RMSK = 0x3,
+};
+
+#define M_CLK_CTRL_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000200)
+enum bimc_m_clk_ctrl {
+ M_CLK_CTRL_RMSK = 0x3,
+ M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK = 0x2,
+ M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT = 0x1,
+ M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1,
+ M_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0,
+};
+
+#define M_MODE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+ M_MODE_RMSK = 0xf0000011,
+ M_MODE_WR_GATHER_BEATS_BMSK = 0xf0000000,
+ M_MODE_WR_GATHER_BEATS_SHFT = 0x1c,
+ M_MODE_NARROW_WR_BMSK = 0x10,
+ M_MODE_NARROW_WR_SHFT = 0x4,
+ M_MODE_ORDERING_MODEL_BMSK = 0x1,
+ M_MODE_ORDERING_MODEL_SHFT = 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+ M_PRIOLVL_OVERRIDE_RMSK = 0x301,
+ M_PRIOLVL_OVERRIDE_BMSK = 0x300,
+ M_PRIOLVL_OVERRIDE_SHFT = 0x8,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK = 0x1,
+ M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+ M_RD_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_RD_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_RD_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_RD_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_RD_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_RD_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_RD_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_RD_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+ M_WR_CMD_OVERRIDE_RMSK = 0x3071f7f,
+ M_WR_CMD_OVERRIDE_AREQPRIO_BMSK = 0x3000000,
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT = 0x18,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK = 0x70000,
+ M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT = 0x10,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK = 0x1000,
+ M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT = 0xc,
+ M_WR_CMD_OVERRIDE_ASHARED_BMSK = 0x800,
+ M_WR_CMD_OVERRIDE_ASHARED_SHFT = 0xb,
+ M_WR_CMD_OVERRIDE_AREDIRECT_BMSK = 0x400,
+ M_WR_CMD_OVERRIDE_AREDIRECT_SHFT = 0xa,
+ M_WR_CMD_OVERRIDE_AOOO_BMSK = 0x200,
+ M_WR_CMD_OVERRIDE_AOOO_SHFT = 0x9,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK = 0x100,
+ M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK = 0x40,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT = 0x6,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK = 0x20,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT = 0x5,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK = 0x10,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK = 0x8,
+ M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT = 0x3,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK = 0x4,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK = 0x2,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK = 0x1,
+ M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT = 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+ M_BKE_EN_RMSK = 0x1,
+ M_BKE_EN_EN_BMSK = 0x1,
+ M_BKE_EN_EN_SHFT = 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+ M_BKE_GP_RMSK = 0x3ff,
+ M_BKE_GP_GP_BMSK = 0x3ff,
+ M_BKE_GP_GP_SHFT = 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+ M_BKE_GC_RMSK = 0xffff,
+ M_BKE_GC_GC_BMSK = 0xffff,
+ M_BKE_GC_GC_SHFT = 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+ M_BKE_THH_RMSK = 0xffff,
+ M_BKE_THH_THRESH_BMSK = 0xffff,
+ M_BKE_THH_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+ M_BKE_THM_RMSK = 0xffff,
+ M_BKE_THM_THRESH_BMSK = 0xffff,
+ M_BKE_THM_THRESH_SHFT = 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+ M_BKE_THL_RMSK = 0xffff,
+ M_BKE_THL_THRESH_BMSK = 0xffff,
+ M_BKE_THL_THRESH_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+ M_BKE_HEALTH_0_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+ M_BKE_HEALTH_1_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+ M_BKE_HEALTH_2_CONFIG_RMSK = 0x80000303,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK = 0x80000000,
+ M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT = 0x1f,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+ M_BKE_HEALTH_3_CONFIG_RMSK = 0x303,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK = 0x300,
+ M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT = 0x8,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK = 0x3,
+ M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT = 0x0,
+};
+
+#define M_BUF_STATUS_ADDR(b, n) \
+ (M_REG_BASE(b) + (0x4000 * (n)) + 0x00000400)
+enum bimc_m_buf_status {
+ M_BUF_STATUS_RMSK = 0xf03f030,
+ M_BUF_STATUS_RCH_DATA_WR_FULL_BMSK = 0x8000000,
+ M_BUF_STATUS_RCH_DATA_WR_FULL_SHFT = 0x1b,
+ M_BUF_STATUS_RCH_DATA_WR_EMPTY_BMSK = 0x4000000,
+ M_BUF_STATUS_RCH_DATA_WR_EMPTY_SHFT = 0x1a,
+ M_BUF_STATUS_RCH_CTRL_WR_FULL_BMSK = 0x2000000,
+ M_BUF_STATUS_RCH_CTRL_WR_FULL_SHFT = 0x19,
+ M_BUF_STATUS_RCH_CTRL_WR_EMPTY_BMSK = 0x1000000,
+ M_BUF_STATUS_RCH_CTRL_WR_EMPTY_SHFT = 0x18,
+ M_BUF_STATUS_BCH_WR_FULL_BMSK = 0x20000,
+ M_BUF_STATUS_BCH_WR_FULL_SHFT = 0x11,
+ M_BUF_STATUS_BCH_WR_EMPTY_BMSK = 0x10000,
+ M_BUF_STATUS_BCH_WR_EMPTY_SHFT = 0x10,
+ M_BUF_STATUS_WCH_DATA_RD_FULL_BMSK = 0x8000,
+ M_BUF_STATUS_WCH_DATA_RD_FULL_SHFT = 0xf,
+ M_BUF_STATUS_WCH_DATA_RD_EMPTY_BMSK = 0x4000,
+ M_BUF_STATUS_WCH_DATA_RD_EMPTY_SHFT = 0xe,
+ M_BUF_STATUS_WCH_CTRL_RD_FULL_BMSK = 0x2000,
+ M_BUF_STATUS_WCH_CTRL_RD_FULL_SHFT = 0xd,
+ M_BUF_STATUS_WCH_CTRL_RD_EMPTY_BMSK = 0x1000,
+ M_BUF_STATUS_WCH_CTRL_RD_EMPTY_SHFT = 0xc,
+ M_BUF_STATUS_ACH_RD_FULL_BMSK = 0x20,
+ M_BUF_STATUS_ACH_RD_FULL_SHFT = 0x5,
+ M_BUF_STATUS_ACH_RD_EMPTY_BMSK = 0x10,
+ M_BUF_STATUS_ACH_RD_EMPTY_SHFT = 0x4,
+};
+/*BIMC Generic */
+
+#define S_REG_BASE(b) ((b) + 0x00048000)
+
+#define S_COMPONENT_INFO_ADDR(b, n) \
+ (S_REG_BASE(b) + (0x8000 * (n)) + 0x00000000)
+enum bimc_s_component_info {
+ S_COMPONENT_INFO_RMSK = 0xffffff,
+ S_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000,
+ S_COMPONENT_INFO_INSTANCE_SHFT = 0x10,
+ S_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00,
+ S_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8,
+ S_COMPONENT_INFO_TYPE_BMSK = 0xff,
+ S_COMPONENT_INFO_TYPE_SHFT = 0x0,
+};
+
+#define S_HW_INFO_ADDR(b, n) \
+ (S_REG_BASE(b) + (0x80000 * (n)) + 0x00000010)
+enum bimc_s_hw_info {
+ S_HW_INFO_RMSK = 0xffffffff,
+ S_HW_INFO_MAJOR_BMSK = 0xff000000,
+ S_HW_INFO_MAJOR_SHFT = 0x18,
+ S_HW_INFO_BRANCH_BMSK = 0xff0000,
+ S_HW_INFO_BRANCH_SHFT = 0x10,
+ S_HW_INFO_MINOR_BMSK = 0xff00,
+ S_HW_INFO_MINOR_SHFT = 0x8,
+ S_HW_INFO_ECO_BMSK = 0xff,
+ S_HW_INFO_ECO_SHFT = 0x0,
+};
+
+
+/* S_SCMO_GENERIC */
+
+#define S_SCMO_REG_BASE(b) ((b) + 0x00048000)
+
+#define S_SCMO_CONFIG_INFO_0_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
+enum bimc_s_scmo_config_info_0 {
+ S_SCMO_CONFIG_INFO_0_RMSK = 0xffffffff,
+ S_SCMO_CONFIG_INFO_0_DATA_WIDTH_BMSK = 0xffff0000,
+ S_SCMO_CONFIG_INFO_0_DATA_WIDTH_SHFT = 0x10,
+ S_SCMO_CONFIG_INFO_0_TID_WIDTH_BMSK = 0xff00,
+ S_SCMO_CONFIG_INFO_0_TID_WIDTH_SHFT = 0x8,
+ S_SCMO_CONFIG_INFO_0_MID_WIDTH_BMSK = 0xff,
+ S_SCMO_CONFIG_INFO_0_MID_WIDTH_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_1_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
+enum bimc_s_scmo_config_info_1 {
+ S_SCMO_CONFIG_INFO_1_RMSK = 0xffffffff,
+ S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff,
+ S_SCMO_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_2_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000040)
+enum bimc_s_scmo_config_info_2 {
+ S_SCMO_CONFIG_INFO_2_RMSK = 0xff00ff,
+ S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_BMSK = 0xff0000,
+ S_SCMO_CONFIG_INFO_2_NUM_GLOBAL_MONS_SHFT = 0x10,
+ S_SCMO_CONFIG_INFO_2_VMID_WIDTH_BMSK = 0xff,
+ S_SCMO_CONFIG_INFO_2_VMID_WIDTH_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_3_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000050)
+enum bimc_s_scmo_config_info_3 {
+ S_SCMO_CONFIG_INFO_3_RMSK = 0xffffffff,
+ S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_BMSK = 0xff000000,
+ S_SCMO_CONFIG_INFO_3_RCH0_CTRL_DEPTH_SHFT = 0x18,
+ S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff0000,
+ S_SCMO_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x10,
+ S_SCMO_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff00,
+ S_SCMO_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x8,
+ S_SCMO_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff,
+ S_SCMO_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_4_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000060)
+enum bimc_s_scmo_config_info_4 {
+ S_SCMO_CONFIG_INFO_4_RMSK = 0xffff,
+ S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_BMSK = 0xff00,
+ S_SCMO_CONFIG_INFO_4_RCH1_CTRL_DEPTH_SHFT = 0x8,
+ S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff,
+ S_SCMO_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_5_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000070)
+enum bimc_s_scmo_config_info_5 {
+ S_SCMO_CONFIG_INFO_5_RMSK = 0xffff,
+ S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_BMSK = 0xff00,
+ S_SCMO_CONFIG_INFO_5_DPE_CQ_DEPTH_SHFT = 0x8,
+ S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_BMSK = 0xff,
+ S_SCMO_CONFIG_INFO_5_DDR_BUS_WIDTH_SHFT = 0x0,
+};
+
+#define S_SCMO_CONFIG_INFO_6_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000080)
+enum bimc_s_scmo_config_info_6 {
+ S_SCMO_CONFIG_INFO_6_RMSK = 0x1111,
+ S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_BMSK = 0x1000,
+ S_SCMO_CONFIG_INFO_6_WBUFC_PIPE_SHFT = 0xc,
+ S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_BMSK = 0x100,
+ S_SCMO_CONFIG_INFO_6_RDOPT_PIPE_SHFT = 0x8,
+ S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_BMSK = 0x10,
+ S_SCMO_CONFIG_INFO_6_ACHAN_INTF_PIPE_SHFT = 0x4,
+ S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_BMSK = 0x1,
+ S_SCMO_CONFIG_INFO_6_ADDR_DECODE_HT_SHFT = 0x0,
+};
+
+#define S_SCMO_INT_STATUS_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000100)
+enum bimc_s_scmo_int_status {
+ S_SCMO_INT_STATUS_RMSK = 0x1,
+ S_SCMO_INT_STATUS_ERR_OCCURED_BMSK = 0x1,
+ S_SCMO_INT_STATUS_ERR_OCCURED_SHFT = 0x0,
+};
+
+#define S_SCMO_INT_CLR_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000108)
+enum bimc_s_scmo_int_clr {
+ S_SCMO_INT_CLR_RMSK = 0x1,
+ S_SCMO_INT_CLR_IRQ_CLR_BMSK = 0x1,
+ S_SCMO_INT_CLR_IRQ_CLR_SHFT = 0x0,
+};
+
+#define S_SCMO_INT_EN_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c)
+enum bimc_s_scmo_int_en {
+ S_SCMO_INT_EN_RMSK = 0x1,
+ S_SCMO_INT_EN_IRQ_EN_BMSK = 0x1,
+ S_SCMO_INT_EN_IRQ_EN_SHFT = 0x0,
+};
+
+#define S_SCMO_ESYN_ADDR_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000120)
+enum bimc_s_scmo_esyn_addr {
+ S_SCMO_ESYN_ADDR_RMSK = 0xffffffff,
+ S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_BMSK = 0xffffffff,
+ S_SCMO_ESYN_ADDR_ESYN_ADDR_ERR_ADDR_SHFT = 0x0,
+};
+
+#define S_SCMO_ESYN_APACKET_0_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000128)
+enum bimc_s_scmo_esyn_apacket_0 {
+ S_SCMO_ESYN_APACKET_0_RMSK = 0xff1fffff,
+ S_SCMO_ESYN_APACKET_0_ERR_ATID_BMSK = 0xff000000,
+ S_SCMO_ESYN_APACKET_0_ERR_ATID_SHFT = 0x18,
+ S_SCMO_ESYN_APACKET_0_ERR_AVMID_BMSK = 0x1f0000,
+ S_SCMO_ESYN_APACKET_0_ERR_AVMID_SHFT = 0x10,
+ S_SCMO_ESYN_APACKET_0_ERR_AMID_BMSK = 0xffff,
+ S_SCMO_ESYN_APACKET_0_ERR_AMID_SHFT = 0x0,
+};
+
+#define S_SCMO_ESYN_APACKET_1_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000012c)
+enum bimc_s_scmo_esyn_apacket_1 {
+ S_SCMO_ESYN_APACKET_1_RMSK = 0x10ff117,
+ S_SCMO_ESYN_APACKET_1_ERR_CODE_BMSK = 0x1000000,
+ S_SCMO_ESYN_APACKET_1_ERR_CODE_SHFT = 0x18,
+ S_SCMO_ESYN_APACKET_1_ERR_ALEN_BMSK = 0xf0000,
+ S_SCMO_ESYN_APACKET_1_ERR_ALEN_SHFT = 0x10,
+ S_SCMO_ESYN_APACKET_1_ERR_ASIZE_BMSK = 0xe000,
+ S_SCMO_ESYN_APACKET_1_ERR_ASIZE_SHFT = 0xd,
+ S_SCMO_ESYN_APACKET_1_ERR_ABURST_BMSK = 0x1000,
+ S_SCMO_ESYN_APACKET_1_ERR_ABURST_SHFT = 0xc,
+ S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_BMSK = 0x100,
+ S_SCMO_ESYN_APACKET_1_ERR_AEXCLUSIVE_SHFT = 0x8,
+ S_SCMO_ESYN_APACKET_1_ERR_APRONTS_BMSK = 0x10,
+ S_SCMO_ESYN_APACKET_1_ERR_APRONTS_SHFT = 0x4,
+ S_SCMO_ESYN_APACKET_1_ERR_AOOORD_BMSK = 0x4,
+ S_SCMO_ESYN_APACKET_1_ERR_AOOORD_SHFT = 0x2,
+ S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_BMSK = 0x2,
+ S_SCMO_ESYN_APACKET_1_ERR_AOOOWR_SHFT = 0x1,
+ S_SCMO_ESYN_APACKET_1_ERR_AWRITE_BMSK = 0x1,
+ S_SCMO_ESYN_APACKET_1_ERR_AWRITE_SHFT = 0x0,
+};
+
+#define S_SCMO_CLK_CTRL_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
+enum bimc_s_scmo_clk_ctrl {
+ S_SCMO_CLK_CTRL_RMSK = 0xffff1111,
+ S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_BMSK = 0x10000,
+ S_SCMO_CLK_CTRL_PEN_CMD_CG_EN_SHFT = 0x10,
+ S_SCMO_CLK_CTRL_RCH_CG_EN_BMSK = 0x1000,
+ S_SCMO_CLK_CTRL_RCH_CG_EN_SHFT = 0xc,
+ S_SCMO_CLK_CTRL_FLUSH_CG_EN_BMSK = 0x100,
+ S_SCMO_CLK_CTRL_FLUSH_CG_EN_SHFT = 0x8,
+ S_SCMO_CLK_CTRL_WCH_CG_EN_BMSK = 0x10,
+ S_SCMO_CLK_CTRL_WCH_CG_EN_SHFT = 0x4,
+ S_SCMO_CLK_CTRL_ACH_CG_EN_BMSK = 0x1,
+ S_SCMO_CLK_CTRL_ACH_CG_EN_SHFT = 0x0,
+};
+
+#define S_SCMO_SLV_INTERLEAVE_CFG_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000400)
+enum bimc_s_scmo_slv_interleave_cfg {
+ S_SCMO_SLV_INTERLEAVE_CFG_RMSK = 0xff,
+ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_BMSK = 0x10,
+ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS1_SHFT = 0x4,
+ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_BMSK = 0x1,
+ S_SCMO_SLV_INTERLEAVE_CFG_INTERLEAVE_CS0_SHFT = 0x0,
+};
+
+#define S_SCMO_ADDR_BASE_CSn_ADDR(b, n, o) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000410 + 0x4 * (o))
+enum bimc_s_scmo_addr_base_csn {
+ S_SCMO_ADDR_BASE_CSn_RMSK = 0xffff,
+ S_SCMO_ADDR_BASE_CSn_MAXn = 1,
+ S_SCMO_ADDR_BASE_CSn_ADDR_BASE_BMSK = 0xfc,
+ S_SCMO_ADDR_BASE_CSn_ADDR_BASE_SHFT = 0x2,
+};
+
+#define S_SCMO_ADDR_MAP_CSn_ADDR(b, n, o) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000420 + 0x4 * (o))
+enum bimc_s_scmo_addr_map_csn {
+ S_SCMO_ADDR_MAP_CSn_RMSK = 0xffff,
+ S_SCMO_ADDR_MAP_CSn_MAXn = 1,
+ S_SCMO_ADDR_MAP_CSn_RANK_EN_BMSK = 0x8000,
+ S_SCMO_ADDR_MAP_CSn_RANK_EN_SHFT = 0xf,
+ S_SCMO_ADDR_MAP_CSn_ADDR_MODE_BMSK = 0x1000,
+ S_SCMO_ADDR_MAP_CSn_ADDR_MODE_SHFT = 0xc,
+ S_SCMO_ADDR_MAP_CSn_BANK_SIZE_BMSK = 0x100,
+ S_SCMO_ADDR_MAP_CSn_BANK_SIZE_SHFT = 0x8,
+ S_SCMO_ADDR_MAP_CSn_ROW_SIZE_BMSK = 0x30,
+ S_SCMO_ADDR_MAP_CSn_ROW_SIZE_SHFT = 0x4,
+ S_SCMO_ADDR_MAP_CSn_COL_SIZE_BMSK = 0x3,
+ S_SCMO_ADDR_MAP_CSn_COL_SIZE_SHFT = 0x0,
+};
+
+#define S_SCMO_ADDR_MASK_CSn_ADDR(b, n, o) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000430 + 0x4 * (0))
+enum bimc_s_scmo_addr_mask_csn {
+ S_SCMO_ADDR_MASK_CSn_RMSK = 0xffff,
+ S_SCMO_ADDR_MASK_CSn_MAXn = 1,
+ S_SCMO_ADDR_MASK_CSn_ADDR_MASK_BMSK = 0xfc,
+ S_SCMO_ADDR_MASK_CSn_ADDR_MASK_SHFT = 0x2,
+};
+
+#define S_SCMO_SLV_STATUS_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000450)
+enum bimc_s_scmo_slv_status {
+ S_SCMO_SLV_STATUS_RMSK = 0xff3,
+ S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_BMSK = 0xff0,
+ S_SCMO_SLV_STATUS_GLOBAL_MONS_IN_USE_SHFT = 0x4,
+ S_SCMO_SLV_STATUS_SLAVE_IDLE_BMSK = 0x3,
+ S_SCMO_SLV_STATUS_SLAVE_IDLE_SHFT = 0x0,
+};
+
+#define S_SCMO_CMD_BUF_CFG_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000500)
+enum bimc_s_scmo_cmd_buf_cfg {
+ S_SCMO_CMD_BUF_CFG_RMSK = 0xf1f,
+ S_SCMO_CMD_BUF_CFG_CMD_ORDERING_BMSK = 0x300,
+ S_SCMO_CMD_BUF_CFG_CMD_ORDERING_SHFT = 0x8,
+ S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_BMSK = 0x10,
+ S_SCMO_CMD_BUF_CFG_HP_CMD_AREQPRIO_MAP_SHFT = 0x4,
+ S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_BMSK = 0x7,
+ S_SCMO_CMD_BUF_CFG_HP_CMD_Q_DEPTH_SHFT = 0x0,
+};
+
+#define S_SCM_CMD_BUF_STATUS_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000520)
+enum bimc_s_scm_cmd_buf_status {
+ S_SCMO_CMD_BUF_STATUS_RMSK = 0x77,
+ S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x70,
+ S_SCMO_CMD_BUF_STATUS_HP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x4,
+ S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_BMSK = 0x7,
+ S_SCMO_CMD_BUF_STATUS_LP_CMD_BUF_ENTRIES_IN_USE_SHFT = 0x0,
+};
+
+#define S_SCMO_RCH_SEL_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000540)
+enum bimc_s_scmo_rch_sel {
+ S_SCMO_RCH_SEL_RMSK = 0xffffffff,
+ S_SCMO_CMD_BUF_STATUS_RCH_PORTS_BMSK = 0xffffffff,
+ S_SCMO_CMD_BUF_STATUS_RCH_PORTS_SHFT = 0x0,
+};
+
+#define S_SCMO_RCH_BKPR_CFG_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000544)
+enum bimc_s_scmo_rch_bkpr_cfg {
+ S_SCMO_RCH_BKPR_CFG_RMSK = 0xffffffff,
+ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_BMSK = 0x3f000000,
+ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_HI_TH_SHFT = 0x18,
+ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_BMSK = 0x3f0000,
+ S_SCMO_RCH_BKPR_CFG_RCH1_FIFO_BKPR_LO_TH_SHFT = 0x10,
+ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_BMSK = 0x3f00,
+ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_HI_TH_SHFT = 0x8,
+ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_BMSK = 0x3f,
+ S_SCMO_RCH_BKPR_CFG_RCH0_FIFO_BKPR_LO_TH_SHFT = 0x0,
+};
+
+#define S_SCMO_RCH_STATUS_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000560)
+enum bimc_s_scmo_rch_status {
+ S_SCMO_RCH_STATUS_RMSK = 0x33333,
+ S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_BMSK = 0x20000,
+ S_SCMO_RCH_STATUS_PRQ_FIFO_FULL_SHFT = 0x11,
+ S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_BMSK = 0x10000,
+ S_SCMO_RCH_STATUS_PRQ_FIFO_EMPTY_SHFT = 0x10,
+ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_BMSK = 0x2000,
+ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_FULL_SHFT = 0xd,
+ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_BMSK = 0x1000,
+ S_SCMO_RCH_STATUS_RCH1_QUAL_FIFO_EMPTY_SHFT = 0xc,
+ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_BMSK = 0x200,
+ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_FULL_SHFT = 0x9,
+ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_BMSK = 0x100,
+ S_SCMO_RCH_STATUS_RCH1_DATA_FIFO_EMPTY_SHFT = 0x8,
+ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_BMSK = 0x20,
+ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_FULL_SHFT = 0x5,
+ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_BMSK = 0x10,
+ S_SCMO_RCH_STATUS_RCH0_QUAL_FIFO_EMPTY_SHFT = 0x4,
+ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_BMSK = 0x2,
+ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_FULL_SHFT = 0x1,
+ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_BMSK = 0x1,
+ S_SCMO_RCH_STATUS_RCH0_DATA_FIFO_EMPTY_SHFT = 0x0,
+};
+
+#define S_SCMO_WCH_BUF_CFG_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000580)
+enum bimc_s_scmo_wch_buf_cfg {
+ S_SCMO_WCH_BUF_CFG_RMSK = 0xff,
+ S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_BMSK = 0x10,
+ S_SCMO_WCH_BUF_CFG_WRITE_BLOCK_READ_SHFT = 0x4,
+ S_SCMO_WCH_BUF_CFG_COALESCE_EN_BMSK = 0x1,
+ S_SCMO_WCH_BUF_CFG_COALESCE_EN_SHFT = 0x0,
+};
+
+#define S_SCMO_WCH_STATUS_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005a0)
+enum bimc_s_scmo_wch_status {
+ S_SCMO_WCH_STATUS_RMSK = 0x333,
+ S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_BMSK = 0x200,
+ S_SCMO_WCH_STATUS_BRESP_FIFO_FULL_SHFT = 0x9,
+ S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_BMSK = 0x100,
+ S_SCMO_WCH_STATUS_BRESP_FIFO_EMPTY_SHFT = 0x8,
+ S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_BMSK = 0x20,
+ S_SCMO_WCH_STATUS_WDATA_FIFO_FULL_SHFT = 0x5,
+ S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_BMSK = 0x10,
+ S_SCMO_WCH_STATUS_WDATA_FIFO_EMPTY_SHFT = 0x4,
+ S_SCMO_WCH_STATUS_WBUF_FULL_BMSK = 0x2,
+ S_SCMO_WCH_STATUS_WBUF_FULL_SHFT = 0x1,
+ S_SCMO_WCH_STATUS_WBUF_EMPTY_BMSK = 0x1,
+ S_SCMO_WCH_STATUS_WBUF_EMPTY_SHFT = 0x0,
+};
+
+#define S_SCMO_FLUSH_CFG_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c0)
+enum bimc_s_scmo_flush_cfg {
+ S_SCMO_FLUSH_CFG_RMSK = 0xffffffff,
+ S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_BMSK = 0x10000000,
+ S_SCMO_FLUSH_CFG_FLUSH_IN_ORDER_SHFT = 0x1c,
+ S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_BMSK = 0x3ff0000,
+ S_SCMO_FLUSH_CFG_FLUSH_IDLE_DELAY_SHFT = 0x10,
+ S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_BMSK = 0xf00,
+ S_SCMO_FLUSH_CFG_FLUSH_UPPER_LIMIT_SHFT = 0x8,
+ S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_BMSK = 0xf,
+ S_SCMO_FLUSH_CFG_FLUSH_LOWER_LIMIT_SHFT = 0x0,
+};
+
+#define S_SCMO_FLUSH_CMD_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x000005c4)
+enum bimc_s_scmo_flush_cmd {
+ S_SCMO_FLUSH_CMD_RMSK = 0xf,
+ S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_BMSK = 0x3,
+ S_SCMO_FLUSH_CMD_FLUSH_ALL_BUF_SHFT = 0x0,
+};
+
+#define S_SCMO_CMD_OPT_CFG0_ADDR(b, n) \
+ (S_SCM0_REG_BASE(b) + (0x8000 * (n)) + 0x00000700)
+enum bimc_s_scmo_cmd_opt_cfg0 {
+ S_SCMO_CMD_OPT_CFG0_RMSK = 0xffffff,
+ S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_BMSK = 0x100000,
+ S_SCMO_CMD_OPT_CFG0_IGNORE_BANK_UNAVL_SHFT = 0x14,
+ S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_BMSK = 0x10000,
+ S_SCMO_CMD_OPT_CFG0_MASK_CMDOUT_PRI_SHFT = 0x10,
+ S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_BMSK = 0x1000,
+ S_SCMO_CMD_OPT_CFG0_DPE_CMD_REORDERING_SHFT = 0xc,
+ S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_BMSK = 0x100,
+ S_SCMO_CMD_OPT_CFG0_WR_OPT_EN_SHFT = 0x8,
+ S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_BMSK = 0x10,
+ S_SCMO_CMD_OPT_CFG0_RD_OPT_EN_SHFT = 0x4,
+ S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_BMSK = 0x1,
+ S_SCMO_CMD_OPT_CFG0_PAGE_MGMT_POLICY_SHFT = 0x0,
+};
+
+#define S_SCMO_CMD_OPT_CFG1_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000704)
+enum bimc_s_scmo_cmd_opt_cfg1 {
+ S_SCMO_CMD_OPT_CFG1_RMSK = 0xffffffff,
+ S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_BMSK = 0x1f000000,
+ S_SCMO_CMD_OPT_CFG1_HSTP_CMD_TIMEOUT_SHFT = 0x18,
+ S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_BMSK = 0x1f0000,
+ S_SCMO_CMD_OPT_CFG1_HP_CMD_TIMEOUT_SHFT = 0x10,
+ S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_BMSK = 0x1f00,
+ S_SCMO_CMD_OPT_CFG1_MP_CMD_TIMEOUT_SHFT = 0x8,
+ S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_BMSK = 0x1f,
+ S_SCMO_CMD_OPT_CFG1_LP_CMD_TIMEOUT_SHFT = 0x0,
+};
+
+#define S_SCMO_CMD_OPT_CFG2_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x00000708)
+enum bimc_s_scmo_cmd_opt_cfg2 {
+ S_SCMO_CMD_OPT_CFG2_RMSK = 0xff,
+ S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_BMSK = 0xf,
+ S_SCMO_CMD_OPT_CFG2_RWOPT_CMD_TIMEOUT_SHFT = 0x0,
+};
+
+#define S_SCMO_CMD_OPT_CFG3_ADDR(b, n) \
+ (S_SCMO_REG_BASE(b) + (0x8000 * (n)) + 0x0000070c)
+enum bimc_s_scmo_cmd_opt_cfg3 {
+ S_SCMO_CMD_OPT_CFG3_RMSK = 0xff,
+ S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_BMSK = 0xf,
+ S_SCMO_CMD_OPT_CFG3_FLUSH_CMD_TIMEOUT_SHFT = 0x0,
+};
+
+/* S_SWAY_GENERIC */
+#define S_SWAY_REG_BASE(b) ((b) + 0x00048000)
+
+#define S_SWAY_CONFIG_INFO_0_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
+enum bimc_s_sway_config_info_0 {
+ S_SWAY_CONFIG_INFO_0_RMSK = 0xff0000ff,
+ S_SWAY_CONFIG_INFO_0_SYNC_MODE_BMSK = 0xff000000,
+ S_SWAY_CONFIG_INFO_0_SYNC_MODE_SHFT = 0x18,
+ S_SWAY_CONFIG_INFO_0_FUNC_BMSK = 0xff,
+ S_SWAY_CONFIG_INFO_0_FUNC_SHFT = 0x0,
+};
+
+#define S_SWAY_CONFIG_INFO_1_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
+enum bimc_s_sway_config_info_1 {
+ S_SWAY_CONFIG_INFO_1_RMSK = 0xffffffff,
+ S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff,
+ S_SWAY_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0,
+};
+
+#define S_SWAY_CONFIG_INFO_2_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000040)
+enum bimc_s_sway_config_info_2 {
+ S_SWAY_CONFIG_INFO_2_RMSK = 0xffff0000,
+ S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_BMSK = 0xffff0000,
+ S_SWAY_CONFIG_INFO_2_MPORT_CONNECTIVITY_SHFT = 0x10,
+};
+
+#define S_SWAY_CONFIG_INFO_3_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000050)
+enum bimc_s_sway_config_info_3 {
+ S_SWAY_CONFIG_INFO_3_RMSK = 0xffffffff,
+ S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_BMSK = 0xff000000,
+ S_SWAY_CONFIG_INFO_3_RCH0_DEPTH_SHFT = 0x18,
+ S_SWAY_CONFIG_INFO_3_BCH_DEPTH_BMSK = 0xff0000,
+ S_SWAY_CONFIG_INFO_3_BCH_DEPTH_SHFT = 0x10,
+ S_SWAY_CONFIG_INFO_3_WCH_DEPTH_BMSK = 0xff,
+ S_SWAY_CONFIG_INFO_3_WCH_DEPTH_SHFT = 0x0,
+};
+
+#define S_SWAY_CONFIG_INFO_4_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000060)
+enum bimc_s_sway_config_info_4 {
+ S_SWAY_CONFIG_INFO_4_RMSK = 0x800000ff,
+ S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_BMSK = 0x80000000,
+ S_SWAY_CONFIG_INFO_4_DUAL_RCH_EN_SHFT = 0x1f,
+ S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_BMSK = 0xff,
+ S_SWAY_CONFIG_INFO_4_RCH1_DEPTH_SHFT = 0x0,
+};
+
+#define S_SWAY_CONFIG_INFO_5_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000070)
+enum bimc_s_sway_config_info_5 {
+ S_SWAY_CONFIG_INFO_5_RMSK = 0x800000ff,
+ S_SWAY_CONFIG_INFO_5_QCH_EN_BMSK = 0x80000000,
+ S_SWAY_CONFIG_INFO_5_QCH_EN_SHFT = 0x1f,
+ S_SWAY_CONFIG_INFO_5_QCH_DEPTH_BMSK = 0xff,
+ S_SWAY_CONFIG_INFO_5_QCH_DEPTH_SHFT = 0x0,
+};
+
+#define S_SWAY_CONFIG_INFO_6_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000080)
+enum bimc_s_sway_config_info_6 {
+ S_SWAY_CONFIG_INFO_6_RMSK = 0x1,
+ S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_BMSK = 0x1,
+ S_SWAY_CONFIG_INFO_6_S2SW_PIPELINE_EN_SHFT = 0x0,
+};
+
+#define S_SWAY_INT_STATUS_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000100)
+enum bimc_s_sway_int_status {
+ S_SWAY_INT_STATUS_RMSK = 0x3,
+ S_SWAY_INT_STATUS_RFU_BMSK = 0x3,
+ S_SWAY_INT_STATUS_RFU_SHFT = 0x0,
+};
+
+#define S_SWAY_INT_CLR_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000108)
+enum bimc_s_sway_int_clr {
+ S_SWAY_INT_CLR_RMSK = 0x3,
+ S_SWAY_INT_CLR_RFU_BMSK = 0x3,
+ S_SWAY_INT_CLR_RFU_SHFT = 0x0,
+};
+
+
+#define S_SWAY_INT_EN_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x0000010c)
+enum bimc_s_sway_int_en {
+ S_SWAY_INT_EN_RMSK = 0x3,
+ S_SWAY_INT_EN_RFU_BMSK = 0x3,
+ S_SWAY_INT_EN_RFU_SHFT = 0x0,
+};
+
+#define S_SWAY_CLK_CTRL_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
+enum bimc_s_sway_clk_ctrl {
+ S_SWAY_CLK_CTRL_RMSK = 0x3,
+ S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2,
+ S_SWAY_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1,
+ S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1,
+ S_SWAY_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0,
+};
+
+#define S_SWAY_RCH_SEL_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000210)
+enum bimc_s_sway_rch_sel {
+ S_SWAY_RCH_SEL_RMSK = 0x7f,
+ S_SWAY_RCH_SEL_UNUSED_BMSK = 0x7f,
+ S_SWAY_RCH_SEL_UNUSED_SHFT = 0x0,
+};
+
+
+#define S_SWAY_MAX_OUTSTANDING_REQS_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000220)
+enum bimc_s_sway_max_outstanding_reqs {
+ S_SWAY_MAX_OUTSTANDING_REQS_RMSK = 0xffff,
+ S_SWAY_MAX_OUTSTANDING_REQS_WRITE_BMSK = 0xff00,
+ S_SWAY_MAX_OUTSTANDING_REQS_WRITE_SHFT = 0x8,
+ S_SWAY_MAX_OUTSTANDING_REQS_READ_BMSK = 0xff,
+ S_SWAY_MAX_OUTSTANDING_REQS_READ_SHFT = 0x0,
+};
+
+
+#define S_SWAY_BUF_STATUS_0_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000400)
+enum bimc_s_sway_buf_status_0 {
+ S_SWAY_BUF_STATUS_0_RMSK = 0xf0300f03,
+ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_BMSK = 0x80000000,
+ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_FULL_SHFT = 0x1f,
+ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_BMSK = 0x40000000,
+ S_SWAY_BUF_STATUS_0_RCH0_DATA_RD_EMPTY_SHFT = 0x1e,
+ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_BMSK = 0x20000000,
+ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_FULL_SHFT = 0x1d,
+ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_BMSK = 0x10000000,
+ S_SWAY_BUF_STATUS_0_RCH0_CTRL_RD_EMPTY_SHFT = 0x1c,
+ S_SWAY_BUF_STATUS_0_BCH_RD_FULL_BMSK = 0x200000,
+ S_SWAY_BUF_STATUS_0_BCH_RD_FULL_SHFT = 0x15,
+ S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_BMSK = 0x100000,
+ S_SWAY_BUF_STATUS_0_BCH_RD_EMPTY_SHFT = 0x14,
+ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_BMSK = 0x800,
+ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_FULL_SHFT = 0xb,
+ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_BMSK = 0x400,
+ S_SWAY_BUF_STATUS_0_WCH_DATA_WR_EMPTY_SHFT = 0xa,
+ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_BMSK = 0x200,
+ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_FULL_SHFT = 0x9,
+ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_BMSK = 0x100,
+ S_SWAY_BUF_STATUS_0_WCH_CTRL_WR_EMPTY_SHFT = 0x8,
+ S_SWAY_BUF_STATUS_0_ACH_WR_FULL_BMSK = 0x2,
+ S_SWAY_BUF_STATUS_0_ACH_WR_FULL_SHFT = 0x1,
+ S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_BMSK = 0x1,
+ S_SWAY_BUF_STATUS_0_ACH_WR_EMPTY_SHFT = 0x0,
+};
+
+#define S_SWAY_BUF_STATUS_1_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000410)
+enum bimc_s_sway_buf_status_1 {
+ S_SWAY_BUF_STATUS_1_RMSK = 0xf0,
+ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_BMSK = 0x80,
+ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_FULL_SHFT = 0x7,
+ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_BMSK = 0x40,
+ S_SWAY_BUF_STATUS_1_RCH1_DATA_RD_EMPTY_SHFT = 0x6,
+ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_BMSK = 0x20,
+ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_FULL_SHFT = 0x5,
+ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_BMSK = 0x10,
+ S_SWAY_BUF_STATUS_1_RCH1_CTRL_RD_EMPTY_SHFT = 0x4,
+};
+
+#define S_SWAY_BUF_STATUS_2_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000420)
+enum bimc_s_sway_buf_status_2 {
+ S_SWAY_BUF_STATUS_2_RMSK = 0x30,
+ S_SWAY_BUF_STATUS_2_QCH_RD_FULL_BMSK = 0x20,
+ S_SWAY_BUF_STATUS_2_QCH_RD_FULL_SHFT = 0x5,
+ S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_BMSK = 0x10,
+ S_SWAY_BUF_STATUS_2_QCH_RD_EMPTY_SHFT = 0x4,
+};
+
+/* S_ARB_GENERIC */
+
+#define S_ARB_REG_BASE(b) ((b) + 0x00049000)
+
+#define S_ARB_COMPONENT_INFO_ADDR(b, n) \
+ (S_SWAY_REG_BASE(b) + (0x8000 * (n)) + 0x00000000)
+enum bimc_s_arb_component_info {
+ S_ARB_COMPONENT_INFO_RMSK = 0xffffff,
+ S_ARB_COMPONENT_INFO_INSTANCE_BMSK = 0xff0000,
+ S_ARB_COMPONENT_INFO_INSTANCE_SHFT = 0x10,
+ S_ARB_COMPONENT_INFO_SUB_TYPE_BMSK = 0xff00,
+ S_ARB_COMPONENT_INFO_SUB_TYPE_SHFT = 0x8,
+ S_ARB_COMPONENT_INFO_TYPE_BMSK = 0xff,
+ S_ARB_COMPONENT_INFO_TYPE_SHFT = 0x0,
+};
+
+#define S_ARB_CONFIG_INFO_0_ADDR(b, n) \
+ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000020)
+enum bimc_s_arb_config_info_0 {
+ S_ARB_CONFIG_INFO_0_RMSK = 0x800000ff,
+ S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_BMSK = 0x80000000,
+ S_ARB_CONFIG_INFO_0_ARB2SW_PIPELINE_EN_SHFT = 0x1f,
+ S_ARB_CONFIG_INFO_0_FUNC_BMSK = 0xff,
+ S_ARB_CONFIG_INFO_0_FUNC_SHFT = 0x0,
+};
+
+#define S_ARB_CONFIG_INFO_1_ADDR(b, n) \
+ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000030)
+enum bimc_s_arb_config_info_1 {
+ S_ARB_CONFIG_INFO_1_RMSK = 0xffffffff,
+ S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_BMSK = 0xffffffff,
+ S_ARB_CONFIG_INFO_1_MPORT_CONNECTIVITY_SHFT = 0x0,
+};
+
+#define S_ARB_CLK_CTRL_ADDR(b) \
+ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000200)
+enum bimc_s_arb_clk_ctrl {
+ S_ARB_CLK_CTRL_RMSK = 0x1,
+ S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_BMSK = 0x2,
+ S_ARB_CLK_CTRL_SLAVE_CLK_GATING_EN_SHFT = 0x1,
+ S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_BMSK = 0x1,
+ S_ARB_CLK_CTRL_CORE_CLK_GATING_EN_SHFT = 0x0,
+ S_ARB_CLK_CTRL_CLK_GATING_EN_BMSK = 0x1,
+ S_ARB_CLK_CTRL_CLK_GATING_EN_SHFT = 0x0,
+};
+
+#define S_ARB_MODE_ADDR(b, n) \
+ (S_ARB_REG_BASE(b) + (0x8000 * (n)) + 0x00000210)
+enum bimc_s_arb_mode {
+ S_ARB_MODE_RMSK = 0xf0000001,
+ S_ARB_MODE_WR_GRANTS_AHEAD_BMSK = 0xf0000000,
+ S_ARB_MODE_WR_GRANTS_AHEAD_SHFT = 0x1c,
+ S_ARB_MODE_PRIO_RR_EN_BMSK = 0x1,
+ S_ARB_MODE_PRIO_RR_EN_SHFT = 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+ (M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+ ((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+ (((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+ (((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+ M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+ (M_BKE_GP_GP_BMSK >> \
+ M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+ (M_BKE_GC_GC_BMSK >> \
+ (M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(int64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b)) {
+ *a = 0;
+ return 1;
+ } else {
+ return do_div(*a, b);
+ }
+}
+
+#define ENABLE(val) ((val) == 1 ? 1 : 0)
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+ uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate)
+{
+ uint32_t val, mask, reg_val;
+ void __iomem *addr;
+
+ reg_val = readl_relaxed(M_CLK_CTRL_ADDR(binfo->base,
+ mas_index)) & M_CLK_CTRL_RMSK;
+ addr = M_CLK_CTRL_ADDR(binfo->base, mas_index);
+ mask = (M_CLK_CTRL_MAS_CLK_GATING_EN_BMSK |
+ M_CLK_CTRL_CORE_CLK_GATING_EN_BMSK);
+ val = (bgate->core_clk_gate_en <<
+ M_CLK_CTRL_MAS_CLK_GATING_EN_SHFT) |
+ bgate->port_clk_gate_en;
+ writel_relaxed(((reg_val & (~mask)) | (val & mask)), addr);
+ /* Ensure clock gating enable mask is set before exiting */
+ wmb();
+}
+
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, bool en)
+{
+ uint32_t reg_val, reg_mask_val, enable, val;
+
+ reg_mask_val = (readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo->
+ base, slv_index)) & S_ARB_CONFIG_INFO_0_FUNC_BMSK)
+ >> S_ARB_CONFIG_INFO_0_FUNC_SHFT;
+ enable = ENABLE(en);
+ val = enable << S_ARB_MODE_PRIO_RR_EN_SHFT;
+ if (reg_mask_val == BIMC_ARB_MODE_PRIORITY_RR) {
+ reg_val = readl_relaxed(S_ARB_CONFIG_INFO_0_ADDR(binfo->
+ base, slv_index)) & S_ARB_MODE_RMSK;
+ writel_relaxed(((reg_val & (~(S_ARB_MODE_PRIO_RR_EN_BMSK))) |
+ (val & S_ARB_MODE_PRIO_RR_EN_BMSK)),
+ S_ARB_MODE_ADDR(binfo->base, slv_index));
+ /* Ensure arbitration mode is set before returning */
+ wmb();
+ }
+}
+
+static void set_qos_mode(void __iomem *baddr, uint32_t index, uint32_t val0,
+ uint32_t val1, uint32_t val2)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(baddr,
+ index)) & M_PRIOLVL_OVERRIDE_RMSK;
+ val = val0 << M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT;
+ writel_relaxed(((reg_val & ~(M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK))
+ | (val & M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK)),
+ M_PRIOLVL_OVERRIDE_ADDR(baddr, index));
+ reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(baddr, index)) &
+ M_RD_CMD_OVERRIDE_RMSK;
+ val = val1 << M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT;
+ writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK
+ )) | (val & M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)),
+ M_RD_CMD_OVERRIDE_ADDR(baddr, index));
+ reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(baddr, index)) &
+ M_WR_CMD_OVERRIDE_RMSK;
+ val = val2 << M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT;
+ writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK
+ )) | (val & M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK)),
+ M_WR_CMD_OVERRIDE_ADDR(baddr, index));
+ /* Ensure the priority register writes go through */
+ wmb();
+}
+
+static void msm_bus_bimc_set_qos_mode(void __iomem *base,
+ uint32_t mas_index, uint8_t qmode_sel)
+{
+ uint32_t reg_val, val;
+
+ switch (qmode_sel) {
+ case BIMC_QOS_MODE_FIXED:
+ reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
+ mas_index));
+ writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)),
+ M_BKE_EN_ADDR(base, mas_index));
+ /* Ensure that the book-keeping register writes
+ * go through before setting QoS mode.
+ * QoS mode registers might write beyond 1K
+ * boundary in future
+ */
+ wmb();
+ set_qos_mode(base, mas_index, 1, 1, 1);
+ break;
+
+ case BIMC_QOS_MODE_BYPASS:
+ reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
+ mas_index));
+ writel_relaxed((reg_val & (~M_BKE_EN_EN_BMSK)),
+ M_BKE_EN_ADDR(base, mas_index));
+ /* Ensure that the book-keeping register writes
+ * go through before setting QoS mode.
+ * QoS mode registers might write beyond 1K
+ * boundary in future
+ */
+ wmb();
+ set_qos_mode(base, mas_index, 0, 0, 0);
+ break;
+
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ set_qos_mode(base, mas_index, 0, 0, 0);
+ reg_val = readl_relaxed(M_BKE_EN_ADDR(base,
+ mas_index));
+ val = 1 << M_BKE_EN_EN_SHFT;
+ /* Ensure that the book-keeping register writes
+ * go through before setting QoS mode.
+ * QoS mode registers might write beyond 1K
+ * boundary in future
+ */
+ wmb();
+ writel_relaxed(((reg_val & (~M_BKE_EN_EN_BMSK)) | (val &
+ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(base,
+ mas_index));
+ break;
+ default:
+ break;
+ }
+}
+
+static void set_qos_prio_rl(void __iomem *addr, uint32_t rmsk,
+ uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+ uint32_t reg_val, val0, val;
+
+ /* Note, addr is already passed with right mas_index */
+ reg_val = readl_relaxed(addr) & rmsk;
+ val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+ qmode->rl.qhealth[index].areq_prio,
+ qmode->rl.qhealth[index].prio_level);
+ val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+ writel_relaxed(val, addr);
+ /* Ensure that priority for regulator/limiter modes are
+ * set before returning
+ */
+ wmb();
+
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+ uint32_t mas_index, uint8_t qmode_sel,
+ struct msm_bus_bimc_qos_mode *qmode)
+{
+ uint32_t reg_val, val;
+
+ switch (qmode_sel) {
+ case BIMC_QOS_MODE_FIXED:
+ reg_val = readl_relaxed(M_PRIOLVL_OVERRIDE_ADDR(
+ base, mas_index)) & M_PRIOLVL_OVERRIDE_RMSK;
+ val = qmode->fixed.prio_level <<
+ M_PRIOLVL_OVERRIDE_SHFT;
+ writel_relaxed(((reg_val &
+ ~(M_PRIOLVL_OVERRIDE_BMSK)) | (val
+ & M_PRIOLVL_OVERRIDE_BMSK)),
+ M_PRIOLVL_OVERRIDE_ADDR(base, mas_index));
+
+ reg_val = readl_relaxed(M_RD_CMD_OVERRIDE_ADDR(
+ base, mas_index)) & M_RD_CMD_OVERRIDE_RMSK;
+ val = qmode->fixed.areq_prio_rd <<
+ M_RD_CMD_OVERRIDE_AREQPRIO_SHFT;
+ writel_relaxed(((reg_val & ~(M_RD_CMD_OVERRIDE_AREQPRIO_BMSK))
+ | (val & M_RD_CMD_OVERRIDE_AREQPRIO_BMSK)),
+ M_RD_CMD_OVERRIDE_ADDR(base, mas_index));
+
+ reg_val = readl_relaxed(M_WR_CMD_OVERRIDE_ADDR(
+ base, mas_index)) & M_WR_CMD_OVERRIDE_RMSK;
+ val = qmode->fixed.areq_prio_wr <<
+ M_WR_CMD_OVERRIDE_AREQPRIO_SHFT;
+ writel_relaxed(((reg_val & ~(M_WR_CMD_OVERRIDE_AREQPRIO_BMSK))
+ | (val & M_WR_CMD_OVERRIDE_AREQPRIO_BMSK)),
+ M_WR_CMD_OVERRIDE_ADDR(base, mas_index));
+ /* Ensure that fixed mode register writes go through
+ * before returning
+ */
+ wmb();
+ break;
+
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ set_qos_prio_rl(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+ set_qos_prio_rl(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+ set_qos_prio_rl(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+ set_qos_prio_rl(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+ mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0 , qmode);
+ break;
+ case BIMC_QOS_MODE_BYPASS:
+ default:
+ break;
+ }
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+ int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+ uint32_t gc)
+{
+ int32_t reg_val, val;
+ int32_t bke_reg_val;
+ int16_t val2;
+
+ /* Disable BKE before writing to registers as per spec */
+ bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+ writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+ M_BKE_EN_ADDR(baddr, mas_index));
+
+ /* Write values of registers calculated */
+ reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+ & M_BKE_GP_RMSK;
+ val = gp << M_BKE_GP_GP_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+ M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+ M_BKE_GC_RMSK;
+ val = gc << M_BKE_GC_GC_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+ M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+ M_BKE_THH_RMSK;
+ val = th << M_BKE_THH_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+ M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+ M_BKE_THM_RMSK;
+ val2 = tm << M_BKE_THM_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+ M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+ reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+ M_BKE_THL_RMSK;
+ val2 = tl << M_BKE_THL_THRESH_SHFT;
+ writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+ (val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+ mas_index));
+
+ /* Ensure that all bandwidth register writes have completed
+ * before returning
+ */
+ wmb();
+}
+
+static void msm_bus_bimc_set_qos_bw(void __iomem *base, uint32_t qos_freq,
+ uint32_t mas_index, struct msm_bus_bimc_qos_bw *qbw)
+{
+ uint32_t bke_en;
+
+ /* Validate QOS Frequency */
+ if (qos_freq == 0) {
+ MSM_BUS_DBG("Zero frequency\n");
+ return;
+ }
+
+ /* Get enable bit for BKE before programming the period */
+ bke_en = (readl_relaxed(M_BKE_EN_ADDR(base, mas_index)) &
+ M_BKE_EN_EN_BMSK) >> M_BKE_EN_EN_SHFT;
+
+ /* Only calculate if there's a requested bandwidth and window */
+ if (qbw->bw && qbw->ws) {
+ int64_t th, tm, tl;
+ uint32_t gp, gc;
+ int64_t gp_nominal, gp_required, gp_calc, data, temp;
+ int64_t win = qbw->ws * qos_freq;
+ temp = win;
+ /*
+ * Calculate nominal grant period defined by requested
+ * window size.
+ * Ceil this value to max grant period.
+ */
+ bimc_div(&temp, 1000000);
+ gp_nominal = min_t(uint64_t, MAX_GRANT_PERIOD, temp);
+ /*
+ * Calculate max window size, defined by bw request.
+ * Units: (KHz, MB/s)
+ */
+ gp_calc = MAX_GC * qos_freq * 1000;
+ gp_required = gp_calc;
+ bimc_div(&gp_required, qbw->bw);
+
+ /* User min of two grant periods */
+ gp = min_t(int64_t, gp_nominal, gp_required);
+
+ /* Calculate bandwith in grants and ceil. */
+ temp = qbw->bw * gp;
+ data = qos_freq * 1000;
+ bimc_div(&temp, data);
+ gc = min_t(int64_t, MAX_GC, temp);
+
+ /* Calculate thresholds */
+ th = qbw->bw - qbw->thh;
+ tm = qbw->bw - qbw->thm;
+ tl = qbw->bw - qbw->thl;
+
+ th = th * gp;
+ bimc_div(&th, data);
+ tm = tm * gp;
+ bimc_div(&tm, data);
+ tl = tl * gp;
+ bimc_div(&tl, data);
+
+ MSM_BUS_DBG("BIMC: BW: mas_index: %d, th: %llu tm: %llu\n",
+ mas_index, th, tm);
+ MSM_BUS_DBG("BIMC: tl: %llu gp:%u gc: %u bke_en: %u\n",
+ tl, gp, gc, bke_en);
+ set_qos_bw_regs(base, mas_index, th, tm, tl, gp, gc);
+ } else
+ /* Clear bandwidth registers */
+ set_qos_bw_regs(base, mas_index, 0, 0, 0, 0, 0);
+}
+
+static int msm_bus_bimc_allocate_commit_data(struct msm_bus_fabric_registration
+ *fab_pdata, void **cdata, int ctx)
+{
+ struct msm_bus_bimc_commit **cd = (struct msm_bus_bimc_commit **)cdata;
+ struct msm_bus_bimc_info *binfo =
+ (struct msm_bus_bimc_info *)fab_pdata->hw_data;
+
+ MSM_BUS_DBG("Allocating BIMC commit data\n");
+ *cd = kzalloc(sizeof(struct msm_bus_bimc_commit), GFP_KERNEL);
+ if (!*cd) {
+ MSM_BUS_DBG("Couldn't alloc mem for cdata\n");
+ return -ENOMEM;
+ }
+
+ (*cd)->mas = binfo->cdata[ctx].mas;
+ (*cd)->slv = binfo->cdata[ctx].slv;
+
+ return 0;
+}
+
+static void *msm_bus_bimc_allocate_bimc_data(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *fab_pdata)
+{
+ struct resource *bimc_mem;
+ struct resource *bimc_io;
+ struct msm_bus_bimc_info *binfo;
+ int i;
+
+ MSM_BUS_DBG("Allocating BIMC data\n");
+ binfo = kzalloc(sizeof(struct msm_bus_bimc_info), GFP_KERNEL);
+ if (!binfo) {
+ WARN(!binfo, "Couldn't alloc mem for bimc_info\n");
+ return NULL;
+ }
+
+ binfo->qos_freq = fab_pdata->qos_freq;
+
+ binfo->params.nmasters = fab_pdata->nmasters;
+ binfo->params.nslaves = fab_pdata->nslaves;
+ binfo->params.bus_id = fab_pdata->id;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ binfo->cdata[i].mas = kzalloc(sizeof(struct
+ msm_bus_node_hw_info) * fab_pdata->nmasters * 2,
+ GFP_KERNEL);
+ if (!binfo->cdata[i].mas) {
+ MSM_BUS_ERR("Couldn't alloc mem for bimc master hw\n");
+ kfree(binfo);
+ return NULL;
+ }
+
+ binfo->cdata[i].slv = kzalloc(sizeof(struct
+ msm_bus_node_hw_info) * fab_pdata->nslaves * 2,
+ GFP_KERNEL);
+ if (!binfo->cdata[i].slv) {
+ MSM_BUS_DBG("Couldn't alloc mem for bimc slave hw\n");
+ kfree(binfo->cdata[i].mas);
+ kfree(binfo);
+ return NULL;
+ }
+ }
+
+ if (fab_pdata->virt) {
+ MSM_BUS_DBG("Don't get memory regions for virtual fabric\n");
+ goto skip_mem;
+ }
+
+ bimc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!bimc_mem) {
+ MSM_BUS_ERR("Cannot get BIMC Base address\n");
+ kfree(binfo);
+ return NULL;
+ }
+
+ bimc_io = request_mem_region(bimc_mem->start,
+ resource_size(bimc_mem), pdev->name);
+ if (!bimc_io) {
+ MSM_BUS_ERR("BIMC memory unavailable\n");
+ kfree(binfo);
+ return NULL;
+ }
+
+ binfo->base = ioremap(bimc_mem->start, resource_size(bimc_mem));
+ if (!binfo->base) {
+ MSM_BUS_ERR("IOremap failed for BIMC!\n");
+ release_mem_region(bimc_mem->start, resource_size(bimc_mem));
+ kfree(binfo);
+ return NULL;
+ }
+
+skip_mem:
+ fab_pdata->hw_data = (void *)binfo;
+ return (void *)binfo;
+}
+
+static void free_commit_data(void *cdata)
+{
+ struct msm_bus_bimc_commit *cd = (struct msm_bus_bimc_commit *)cdata;
+
+ kfree(cd->mas);
+ kfree(cd->slv);
+ kfree(cd);
+}
+
+static void bke_switch(
+ void __iomem *baddr, uint32_t mas_index, bool req, int mode)
+{
+ uint32_t reg_val, val, cur_val;
+
+ val = req << M_BKE_EN_EN_SHFT;
+ reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+ cur_val = reg_val & M_BKE_EN_RMSK;
+ if (val == cur_val)
+ return;
+
+ if (!req && mode == BIMC_QOS_MODE_FIXED)
+ set_qos_mode(baddr, mas_index, 1, 1, 1);
+
+ writel_relaxed(((reg_val & ~(M_BKE_EN_EN_BMSK)) | (val &
+ M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(baddr, mas_index));
+ /* Make sure BKE on/off goes through before changing priorities */
+ wmb();
+
+ if (req)
+ set_qos_mode(baddr, mas_index, 0, 0, 0);
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+ int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+ int32_t bw_mbps, thh = 0, thm, thl, gc;
+ int32_t gp;
+ u64 temp;
+
+ if (qos_freq == 0) {
+ MSM_BUS_DBG("No QoS Frequency.\n");
+ return;
+ }
+
+ if (!(qbw->bw && qbw->gp)) {
+ MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+ return;
+ }
+
+ /* Convert bandwidth to MBPS */
+ temp = qbw->bw;
+ bimc_div(&temp, 1000000);
+ bw_mbps = temp;
+
+ /* Grant period in clock cycles
+ * Grant period from bandwidth structure
+ * is in nano seconds, QoS freq is in KHz.
+ * Divide by 1000 to get clock cycles.
+ */
+ gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+ /* Grant count = BW in MBps * Grant period
+ * in micro seconds
+ */
+ gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+ gc = min(gc, MAX_GC);
+
+ /* Medium threshold = -((Medium Threshold percentage *
+ * Grant count) / 100)
+ */
+ thm = -((qbw->thmp * gc) / 100);
+ qbw->thm = thm;
+
+ /* Low threshold = -(Grant count) */
+ thl = -gc;
+ qbw->thl = thl;
+
+ MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+ __func__, gp, gc, thm, thl, thh);
+
+ trace_bus_bke_params(gc, gp, thl, thm, thl);
+ set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static void msm_bus_bimc_config_master(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info,
+ uint64_t req_clk, uint64_t req_bw)
+{
+ int mode, i, ports;
+ struct msm_bus_bimc_info *binfo;
+ uint64_t bw = 0;
+
+ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
+ ports = info->node_info->num_mports;
+
+ /**
+ * Here check the details of dual configuration.
+ * Take actions based on different modes.
+ * Check for threshold if limiter mode, etc.
+ */
+
+ if (req_clk <= info->node_info->th[0]) {
+ mode = info->node_info->mode;
+ bw = info->node_info->bimc_bw[0];
+ } else if ((info->node_info->num_thresh > 1) &&
+ (req_clk <= info->node_info->th[1])) {
+ mode = info->node_info->mode;
+ bw = info->node_info->bimc_bw[1];
+ } else
+ mode = info->node_info->mode_thresh;
+
+ switch (mode) {
+ case BIMC_QOS_MODE_BYPASS:
+ case BIMC_QOS_MODE_FIXED:
+ for (i = 0; i < ports; i++)
+ bke_switch(binfo->base, info->node_info->qport[i],
+ BKE_OFF, mode);
+ break;
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ for (i = 0; i < ports; i++) {
+ /* If not in fixed mode, update bandwidth */
+ if ((info->node_info->cur_lim_bw != bw)
+ && (mode != BIMC_QOS_MODE_FIXED)) {
+ struct msm_bus_bimc_qos_bw qbw;
+ qbw.ws = info->node_info->ws;
+ qbw.bw = bw;
+ qbw.gp = info->node_info->bimc_gp;
+ qbw.thmp = info->node_info->bimc_thmp;
+ bimc_set_static_qos_bw(binfo->base,
+ binfo->qos_freq,
+ info->node_info->qport[i], &qbw);
+ info->node_info->cur_lim_bw = bw;
+ MSM_BUS_DBG("%s: Qos is %d reqclk %llu bw %llu",
+ __func__, mode, req_clk, bw);
+ }
+ bke_switch(binfo->base, info->node_info->qport[i],
+ BKE_ON, mode);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void msm_bus_bimc_update_bw(struct msm_bus_inode_info *hop,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_fabric_registration *fab_pdata,
+ void *sel_cdata, int *master_tiers,
+ int64_t add_bw)
+{
+ struct msm_bus_bimc_info *binfo;
+ struct msm_bus_bimc_qos_bw qbw;
+ int i;
+ int64_t bw;
+ int ports = info->node_info->num_mports;
+ struct msm_bus_bimc_commit *sel_cd =
+ (struct msm_bus_bimc_commit *)sel_cdata;
+
+ MSM_BUS_DBG("BIMC: Update bw for ID %d, with IID: %d: %lld\n",
+ info->node_info->id, info->node_info->priv_id, add_bw);
+
+ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
+
+ if (info->node_info->num_mports == 0) {
+ MSM_BUS_DBG("BIMC: Skip Master BW\n");
+ goto skip_mas_bw;
+ }
+
+ ports = info->node_info->num_mports;
+ bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
+
+ for (i = 0; i < ports; i++) {
+ sel_cd->mas[info->node_info->masterp[i]].bw += bw;
+ sel_cd->mas[info->node_info->masterp[i]].hw_id =
+ info->node_info->mas_hw_id;
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+ info->node_info->priv_id,
+ sel_cd->mas[info->node_info->masterp[i]].bw);
+ if (info->node_info->hw_sel == MSM_BUS_RPM)
+ sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+ if (!(info->node_info->mode == BIMC_QOS_MODE_REGULATOR)
+ || (info->node_info->mode ==
+ BIMC_QOS_MODE_LIMITER)) {
+ MSM_BUS_DBG("Skip QoS reg programming\n");
+ break;
+ }
+
+ MSM_BUS_DBG("qport: %d\n", info->node_info->qport[i]);
+ qbw.bw = sel_cd->mas[info->node_info->masterp[i]].bw;
+ qbw.ws = info->node_info->ws;
+ /* Threshold low = 90% of bw */
+ qbw.thl = div_s64((90 * bw), 100);
+ /* Threshold medium = bw */
+ qbw.thm = bw;
+ /* Threshold high = 10% more than bw */
+ qbw.thh = div_s64((110 * bw), 100);
+ /* Check if info is a shared master.
+ * If it is, mark it dirty
+ * If it isn't, then set QOS Bandwidth.
+ * Also if dual-conf is set, don't program bw regs.
+ **/
+ if (!info->node_info->dual_conf &&
+ ((info->node_info->mode == BIMC_QOS_MODE_LIMITER) ||
+ (info->node_info->mode == BIMC_QOS_MODE_REGULATOR)))
+ msm_bus_bimc_set_qos_bw(binfo->base,
+ binfo->qos_freq,
+ info->node_info->qport[i], &qbw);
+ }
+ }
+
+skip_mas_bw:
+ ports = hop->node_info->num_sports;
+ MSM_BUS_DBG("BIMC: ID: %d, Sports: %d\n", hop->node_info->priv_id,
+ ports);
+
+ for (i = 0; i < ports; i++) {
+ sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw;
+ sel_cd->slv[hop->node_info->slavep[i]].hw_id =
+ hop->node_info->slv_hw_id;
+ MSM_BUS_DBG("BIMC: Update slave_bw: ID: %d -> %llu\n",
+ hop->node_info->priv_id,
+ sel_cd->slv[hop->node_info->slavep[i]].bw);
+ MSM_BUS_DBG("BIMC: Update slave_bw: index: %d\n",
+ hop->node_info->slavep[i]);
+ /* Check if hop is a shared slave.
+ * If it is, mark it dirty
+ * If it isn't, then nothing to be done as the
+ * slaves are in bypass mode.
+ **/
+ if (hop->node_info->hw_sel == MSM_BUS_RPM) {
+ MSM_BUS_DBG("Slave dirty: %d, slavep: %d\n",
+ hop->node_info->priv_id,
+ hop->node_info->slavep[i]);
+ sel_cd->slv[hop->node_info->slavep[i]].dirty = 1;
+ }
+ }
+}
+
+static int msm_bus_bimc_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ MSM_BUS_DBG("\nReached BIMC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
+ return 0;
+}
+
+static void msm_bus_bimc_config_limiter(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_bimc_info *binfo;
+ int mode, i, ports;
+
+ binfo = (struct msm_bus_bimc_info *)fab_pdata->hw_data;
+ ports = info->node_info->num_mports;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return;
+ }
+
+ if (info->cur_lim_bw)
+ mode = BIMC_QOS_MODE_LIMITER;
+ else
+ mode = info->node_info->mode;
+
+ switch (mode) {
+ case BIMC_QOS_MODE_BYPASS:
+ case BIMC_QOS_MODE_FIXED:
+ for (i = 0; i < ports; i++)
+ bke_switch(binfo->base, info->node_info->qport[i],
+ BKE_OFF, mode);
+ break;
+ case BIMC_QOS_MODE_REGULATOR:
+ case BIMC_QOS_MODE_LIMITER:
+ if (info->cur_lim_bw != info->cur_prg_bw) {
+ MSM_BUS_DBG("Enabled BKE throttling node %d to %llu\n",
+ info->node_info->id, info->cur_lim_bw);
+ trace_bus_bimc_config_limiter(info->node_info->id,
+ info->cur_lim_bw);
+ for (i = 0; i < ports; i++) {
+ /* If not in fixed mode, update bandwidth */
+ struct msm_bus_bimc_qos_bw qbw;
+
+ qbw.ws = info->node_info->ws;
+ qbw.bw = info->cur_lim_bw;
+ qbw.gp = info->node_info->bimc_gp;
+ qbw.thmp = info->node_info->bimc_thmp;
+ bimc_set_static_qos_bw(binfo->base,
+ binfo->qos_freq,
+ info->node_info->qport[i], &qbw);
+ bke_switch(binfo->base,
+ info->node_info->qport[i],
+ BKE_ON, mode);
+ info->cur_prg_bw = qbw.bw;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bimc_init_mas_reg(struct msm_bus_bimc_info *binfo,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_bimc_qos_mode *qmode, int mode)
+{
+ int i;
+
+ switch (mode) {
+ case BIMC_QOS_MODE_FIXED:
+ qmode->fixed.prio_level = info->node_info->prio_lvl;
+ qmode->fixed.areq_prio_rd = info->node_info->prio_rd;
+ qmode->fixed.areq_prio_wr = info->node_info->prio_wr;
+ break;
+ case BIMC_QOS_MODE_LIMITER:
+ qmode->rl.qhealth[0].limit_commands = 1;
+ qmode->rl.qhealth[1].limit_commands = 0;
+ qmode->rl.qhealth[2].limit_commands = 0;
+ qmode->rl.qhealth[3].limit_commands = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return;
+ }
+
+ for (i = 0; i < info->node_info->num_mports; i++) {
+ /* If not in bypass mode, update priority */
+ if (mode != BIMC_QOS_MODE_BYPASS) {
+ msm_bus_bimc_set_qos_prio(binfo->base,
+ info->node_info->
+ qport[i], mode, qmode);
+
+ /* If not in fixed mode, update bandwidth */
+ if (mode != BIMC_QOS_MODE_FIXED) {
+ struct msm_bus_bimc_qos_bw qbw;
+ qbw.ws = info->node_info->ws;
+ qbw.bw = info->node_info->bimc_bw[0];
+ qbw.gp = info->node_info->bimc_gp;
+ qbw.thmp = info->node_info->bimc_thmp;
+ bimc_set_static_qos_bw(binfo->base,
+ binfo->qos_freq,
+ info->node_info->qport[i], &qbw);
+ }
+ }
+
+ /* set mode */
+ msm_bus_bimc_set_qos_mode(binfo->base,
+ info->node_info->qport[i],
+ mode);
+ }
+}
+
+static void init_health_regs(struct msm_bus_bimc_info *binfo,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_bimc_qos_mode *qmode,
+ int mode)
+{
+ int i;
+
+ if (mode == BIMC_QOS_MODE_LIMITER) {
+ qmode->rl.qhealth[0].limit_commands = 1;
+ qmode->rl.qhealth[1].limit_commands = 0;
+ qmode->rl.qhealth[2].limit_commands = 0;
+ qmode->rl.qhealth[3].limit_commands = 0;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return;
+ }
+
+ for (i = 0; i < info->node_info->num_mports; i++) {
+ /* If not in bypass mode, update priority */
+ if (mode != BIMC_QOS_MODE_BYPASS)
+ msm_bus_bimc_set_qos_prio(binfo->base,
+ info->node_info->qport[i], mode, qmode);
+ }
+ }
+}
+
+
+static int msm_bus_bimc_mas_init(struct msm_bus_bimc_info *binfo,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_bimc_qos_mode *qmode;
+ qmode = kzalloc(sizeof(struct msm_bus_bimc_qos_mode),
+ GFP_KERNEL);
+ if (!qmode) {
+ MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n",
+ info->node_info->id);
+ return -ENOMEM;
+ }
+
+ info->hw_data = (void *)qmode;
+
+ /**
+ * If the master supports dual configuration,
+ * configure registers for both modes
+ */
+ if (info->node_info->dual_conf)
+ bimc_init_mas_reg(binfo, info, qmode,
+ info->node_info->mode_thresh);
+ else if (info->node_info->nr_lim)
+ init_health_regs(binfo, info, qmode, BIMC_QOS_MODE_LIMITER);
+
+ bimc_init_mas_reg(binfo, info, qmode, info->node_info->mode);
+ return 0;
+}
+
+static void msm_bus_bimc_node_init(void *hw_data,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_bimc_info *binfo =
+ (struct msm_bus_bimc_info *)hw_data;
+
+ if (!IS_SLAVE(info->node_info->priv_id) &&
+ (info->node_info->hw_sel != MSM_BUS_RPM))
+ msm_bus_bimc_mas_init(binfo, info);
+}
+
+static int msm_bus_bimc_port_halt(uint32_t haltid, uint8_t mport)
+{
+ return 0;
+}
+
+static int msm_bus_bimc_port_unhalt(uint32_t haltid, uint8_t mport)
+{
+ return 0;
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq,
+ bool enable_lim, u64 lim_bw)
+{
+ int mode;
+ int i;
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to limit\n");
+ return 0;
+ }
+
+ if (enable_lim && lim_bw) {
+ mode = BIMC_QOS_MODE_LIMITER;
+
+ if (!info->node_info->lim_bw) {
+ struct msm_bus_bimc_qos_mode qmode;
+ qmode.rl.qhealth[0].limit_commands = 1;
+ qmode.rl.qhealth[1].limit_commands = 0;
+ qmode.rl.qhealth[2].limit_commands = 0;
+ qmode.rl.qhealth[3].limit_commands = 0;
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ /* If not in bypass mode, update priority */
+ if (mode != BIMC_QOS_MODE_BYPASS)
+ msm_bus_bimc_set_qos_prio(qos_base,
+ info->node_info->qport[i], mode,
+ &qmode);
+ }
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ struct msm_bus_bimc_qos_bw qbw;
+ /* If not in fixed mode, update bandwidth */
+ if ((info->node_info->lim_bw != lim_bw)) {
+ qbw.ws = info->node_info->qos_params.ws;
+ qbw.bw = lim_bw;
+ qbw.gp = info->node_info->qos_params.gp;
+ qbw.thmp = info->node_info->qos_params.thmp;
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->node_info->qport[i], &qbw);
+ }
+ bke_switch(qos_base, info->node_info->qport[i],
+ BKE_ON, mode);
+ }
+ info->node_info->lim_bw = lim_bw;
+ } else {
+ mode = info->node_info->qos_params.mode;
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ bke_switch(qos_base, info->node_info->qport[i],
+ BKE_OFF, mode);
+ }
+ }
+ info->node_info->qos_params.cur_mode = mode;
+ return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+ bool ret = false;
+
+ if ((mode == BIMC_QOS_MODE_LIMITER)
+ || (mode == BIMC_QOS_MODE_REGULATOR))
+ ret = true;
+
+ return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ int i;
+ struct msm_bus_bimc_qos_mode qmode;
+
+ switch (info->node_info->qos_params.mode) {
+ case BIMC_QOS_MODE_FIXED:
+ qmode.fixed.prio_level = info->node_info->qos_params.prio_lvl;
+ qmode.fixed.areq_prio_rd = info->node_info->qos_params.prio_rd;
+ qmode.fixed.areq_prio_wr = info->node_info->qos_params.prio_wr;
+ break;
+ case BIMC_QOS_MODE_LIMITER:
+ qmode.rl.qhealth[0].limit_commands = 1;
+ qmode.rl.qhealth[1].limit_commands = 0;
+ qmode.rl.qhealth[2].limit_commands = 0;
+ qmode.rl.qhealth[3].limit_commands = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return 0;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ /* If not in bypass mode, update priority */
+ if (info->node_info->qos_params.mode != BIMC_QOS_MODE_BYPASS)
+ msm_bus_bimc_set_qos_prio(qos_base, info->node_info->
+ qport[i], info->node_info->qos_params.mode,
+ &qmode);
+
+ /* set mode */
+ if (info->node_info->qos_params.mode == BIMC_QOS_MODE_LIMITER)
+ bke_switch(qos_base, info->node_info->qport[i],
+ BKE_OFF, BIMC_QOS_MODE_FIXED);
+ else
+ msm_bus_bimc_set_qos_mode(qos_base,
+ info->node_info->qport[i],
+ info->node_info->qos_params.mode);
+ }
+
+ return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base, uint32_t qos_off,
+ uint32_t qos_delta, uint32_t qos_freq)
+{
+ struct msm_bus_bimc_qos_bw qbw;
+ int i;
+ int64_t bw = 0;
+ int ret = 0;
+ struct msm_bus_node_info_type *info = dev->node_info;
+
+ if (info && info->num_qports &&
+ ((info->qos_params.mode == BIMC_QOS_MODE_LIMITER) ||
+ (info->qos_params.mode == BIMC_QOS_MODE_REGULATOR))) {
+ bw = msm_bus_div64(info->num_qports,
+ dev->node_ab.ab[DUAL_CTX]);
+
+ for (i = 0; i < info->num_qports; i++) {
+ MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+ info->id, bw);
+
+ if (!info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+
+ qbw.bw = bw + info->qos_params.bw_buffer;
+ trace_bus_bimc_config_limiter(info->id, bw);
+
+ /* Default to gp of 5us */
+ qbw.gp = (info->qos_params.gp ?
+ info->qos_params.gp : 5000);
+ /* Default to thmp of 50% */
+ qbw.thmp = (info->qos_params.thmp ?
+ info->qos_params.thmp : 50);
+ /*
+ * If the BW vote is 0 then set the QoS mode to
+ * Fixed.
+ */
+ if (bw) {
+ bimc_set_static_qos_bw(qos_base, qos_freq,
+ info->qport[i], &qbw);
+ bke_switch(qos_base, info->qport[i],
+ BKE_ON, info->qos_params.mode);
+ } else {
+ bke_switch(qos_base, info->qport[i],
+ BKE_OFF, BIMC_QOS_MODE_FIXED);
+ }
+ }
+ }
+ return ret;
+}
+
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ /* Set interleaving to true by default */
+ MSM_BUS_DBG("\nInitializing BIMC...\n");
+ pdata->il_flag = true;
+ hw_algo->allocate_commit_data = msm_bus_bimc_allocate_commit_data;
+ hw_algo->allocate_hw_data = msm_bus_bimc_allocate_bimc_data;
+ hw_algo->node_init = msm_bus_bimc_node_init;
+ hw_algo->free_commit_data = free_commit_data;
+ hw_algo->update_bw = msm_bus_bimc_update_bw;
+ hw_algo->commit = msm_bus_bimc_commit;
+ hw_algo->port_halt = msm_bus_bimc_port_halt;
+ hw_algo->port_unhalt = msm_bus_bimc_port_unhalt;
+ hw_algo->config_master = msm_bus_bimc_config_master;
+ hw_algo->config_limiter = msm_bus_bimc_config_limiter;
+ hw_algo->update_bw_reg = msm_bus_bimc_update_bw_reg;
+ /* BIMC slaves are shared. Slave registers are set through RPM */
+ if (!pdata->ahb)
+ pdata->rpm_enabled = 1;
+ return 0;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+ else {
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+ bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+ bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+ bus_dev->fabdev->noc_ops.update_bw_reg =
+ msm_bus_bimc_update_bw_reg;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.h b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
new file mode 100644
index 0000000000000..12c8325e98630
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+struct msm_bus_bimc_params {
+ uint32_t bus_id;
+ uint32_t addr_width;
+ uint32_t data_width;
+ uint32_t nmasters;
+ uint32_t nslaves;
+};
+
+struct msm_bus_bimc_commit {
+ struct msm_bus_node_hw_info *mas;
+ struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_bimc_info {
+ void __iomem *base;
+ uint32_t base_addr;
+ uint32_t qos_freq;
+ struct msm_bus_bimc_params params;
+ struct msm_bus_bimc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_bimc_node {
+ uint32_t conn_mask;
+ uint32_t data_width;
+ uint8_t slv_arb_mode;
+};
+
+enum msm_bus_bimc_arb_mode {
+ BIMC_ARB_MODE_RR = 0,
+ BIMC_ARB_MODE_PRIORITY_RR,
+ BIMC_ARB_MODE_TIERED_RR,
+};
+
+
+enum msm_bus_bimc_interleave {
+ BIMC_INTERLEAVE_NONE = 0,
+ BIMC_INTERLEAVE_ODD,
+ BIMC_INTERLEAVE_EVEN,
+};
+
+struct msm_bus_bimc_slave_seg {
+ bool enable;
+ uint64_t start_addr;
+ uint64_t seg_size;
+ uint8_t interleave;
+};
+
+enum msm_bus_bimc_qos_mode_type {
+ BIMC_QOS_MODE_FIXED = 0,
+ BIMC_QOS_MODE_LIMITER,
+ BIMC_QOS_MODE_BYPASS,
+ BIMC_QOS_MODE_REGULATOR,
+};
+
+struct msm_bus_bimc_qos_health {
+ bool limit_commands;
+ uint32_t areq_prio;
+ uint32_t prio_level;
+};
+
+struct msm_bus_bimc_mode_fixed {
+ uint32_t prio_level;
+ uint32_t areq_prio_rd;
+ uint32_t areq_prio_wr;
+};
+
+struct msm_bus_bimc_mode_rl {
+ uint8_t qhealthnum;
+ struct msm_bus_bimc_qos_health qhealth[4];
+};
+
+struct msm_bus_bimc_qos_mode {
+ uint8_t mode;
+ struct msm_bus_bimc_mode_fixed fixed;
+ struct msm_bus_bimc_mode_rl rl;
+};
+
+struct msm_bus_bimc_qos_bw {
+ uint64_t bw; /* bw is in Bytes/sec */
+ uint32_t ws; /* Window size in nano seconds*/
+ int64_t thh; /* Threshold high, bytes per second */
+ int64_t thm; /* Threshold medium, bytes per second */
+ int64_t thl; /* Threshold low, bytes per second */
+ u32 gp; /* Grant Period in micro seconds */
+ u32 thmp; /* Threshold medium in percentage */
+};
+
+struct msm_bus_bimc_clk_gate {
+ bool core_clk_gate_en;
+ bool arb_clk_gate_en; /* For arbiter */
+ bool port_clk_gate_en; /* For regs on BIMC core clock */
+};
+
+void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, uint32_t seg_index,
+ struct msm_bus_bimc_slave_seg *bsseg);
+void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+ uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, bool en);
+void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
+ struct msm_bus_bimc_params *params);
+void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
+ uint32_t mas_index, struct msm_bus_bimc_node *mparams);
+void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index, struct msm_bus_bimc_node *sparams);
+bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
+ uint32_t slv_index);
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c b/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c
new file mode 100644
index 0000000000000..1fec726de87ed
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_board_8974.c
@@ -0,0 +1,2021 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define NMASTERS 120
+#define NSLAVES 150
+#define NFAB_8974 7
+
+enum msm_bus_8974_master_ports_type {
+ /* System NOC Masters */
+ MASTER_PORT_LPASS_AHB = 0,
+ MASTER_PORT_QDSS_BAM,
+ MASTER_PORT_SNOC_CFG,
+ MASTER_PORT_GW_BIMC_SNOC,
+ MASTER_PORT_GW_CNOC_SNOC,
+ MASTER_PORT_CRYPTO_CORE0,
+ MASTER_PORT_CRYPTO_CORE1,
+ MASTER_PORT_LPASS_PROC,
+ MASTER_PORT_MSS,
+ MASTER_PORT_MSS_NAV,
+ MASTER_PORT_OCMEM_DMA,
+ MASTER_PORT_GW_PNOC_SNOC,
+ MASTER_PORT_WCSS,
+ MASTER_PORT_QDSS_ETR,
+ MASTER_PORT_USB3,
+
+ /* MMSS NOC Masters */
+ MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG = 0,
+ MASTER_PORT_GW_CNOC_MNOC_CFG,
+ MASTER_PORT_GFX3D_PORT0,
+ MASTER_PORT_GFX3D_PORT1,
+ MASTER_PORT_JPEG,
+ MASTER_PORT_MDP,
+ /* Venus video core */
+ MASTER_PORT_VIDEO_PORT0,
+ MASTER_PORT_VIDEO_PORT1,
+ MASTER_PORT_VFE = 16,
+
+ /* BIMC Masters */
+ MASTER_PORT_KMPSS_M0 = 0,
+ MASTER_PORT_KMPSS_M1,
+ MASTER_PORT_MSS_PROC,
+ MASTER_PORT_GW_MNOC_BIMC_0,
+ MASTER_PORT_GW_MNOC_BIMC_1,
+ MASTER_PORT_GW_SNOC_BIMC_0,
+ MASTER_PORT_GW_SNOC_BIMC_1,
+
+ /* OCMEM NOC Masters */
+ MASTER_PORT_CNOC_ONOC_CFG = 0,
+ MASTER_PORT_JPEG_OCMEM,
+ MASTER_PORT_MDP_OCMEM,
+ MASTER_PORT_VIDEO_P0_OCMEM,
+ MASTER_PORT_VIDEO_P1_OCMEM,
+ MASTER_PORT_VFE_OCMEM,
+
+ /* Peripheral NOC Masters */
+ MASTER_PORT_SDCC_1 = 0,
+ MASTER_PORT_SDCC_3,
+ MASTER_PORT_SDCC_2,
+ MASTER_PORT_SDCC_4,
+ MASTER_PORT_TSIF,
+ MASTER_PORT_BAM_DMA,
+ MASTER_PORT_BLSP_2,
+ MASTER_PORT_USB_HSIC,
+ MASTER_PORT_BLSP_1,
+ MASTER_PORT_USB_HS,
+ MASTER_PORT_PNOC_CFG,
+ MASTER_PORT_GW_SNOC_PNOC,
+
+ /* Config NOC Masters */
+ MASTER_PORT_RPM_INST = 0,
+ MASTER_PORT_RPM_DATA,
+ MASTER_PORT_RPM_SYS,
+ MASTER_PORT_DEHR,
+ MASTER_PORT_QDSS_DAP,
+ MASTER_PORT_SPDM,
+ MASTER_PORT_TIC,
+ MASTER_PORT_GW_SNOC_CNOC,
+};
+
+enum msm_bus_8974_slave_ports_type {
+ /* System NOC Slaves */
+ SLAVE_PORT_KMPSS = 1,
+ SLAVE_PORT_LPASS,
+ SLAVE_PORT_USB3 = 4,
+ SLAVE_PORT_WCSS = 6,
+ SLAVE_PORT_GW_SNOC_BIMC_P0,
+ SLAVE_PORT_GW_SNOC_BIMC_P1,
+ SLAVE_PORT_GW_SNOC_CNOC,
+ SLAVE_PORT_OCIMEM,
+ SLAVE_PORT_SNOC_OCMEM,
+ SLAVE_PORT_GW_SNOC_PNOC,
+ SLAVE_PORT_SERVICE_SNOC,
+ SLAVE_PORT_QDSS_STM,
+
+ /* MMSS NOC Slaves */
+ SLAVE_PORT_CAMERA_CFG = 0,
+ SLAVE_PORT_DISPLAY_CFG,
+ SLAVE_PORT_OCMEM_CFG,
+ SLAVE_PORT_CPR_CFG,
+ SLAVE_PORT_CPR_XPU_CFG,
+ SLAVE_PORT_MISC_CFG = 6,
+ SLAVE_PORT_MISC_XPU_CFG,
+ SLAVE_PORT_VENUS_CFG,
+ SLAVE_PORT_GFX3D_CFG,
+ SLAVE_PORT_MMSS_CLK_CFG = 11,
+ SLAVE_PORT_MMSS_CLK_XPU_CFG,
+ SLAVE_PORT_MNOC_MPU_CFG,
+ SLAVE_PORT_ONOC_MPU_CFG,
+ SLAVE_PORT_GW_MMSS_BIMC_P0 = 16,
+ SLAVE_PORT_GW_MMSS_BIMC_P1,
+ SLAVE_PORT_SERVICE_MNOC,
+
+ /* BIMC Slaves */
+ SLAVE_PORT_EBI1_CH0 = 0,
+ SLAVE_PORT_EBI1_CH1,
+ SLAVE_PORT_KMPSS_L2,
+ SLAVE_PORT_GW_BIMC_SNOC,
+
+ /* OCMEM NOC Slaves */
+ SLAVE_PORT_OCMEM_P0 = 0,
+ SLAVE_PORT_OCMEM_P1,
+ SLAVE_PORT_SERVICE_ONOC,
+
+ /*Peripheral NOC Slaves */
+ SLAVE_PORT_SDCC_1 = 0,
+ SLAVE_PORT_SDCC_3,
+ SLAVE_PORT_SDCC_2,
+ SLAVE_PORT_SDCC_4,
+ SLAVE_PORT_TSIF,
+ SLAVE_PORT_BAM_DMA,
+ SLAVE_PORT_BLSP_2,
+ SLAVE_PORT_USB_HSIC,
+ SLAVE_PORT_BLSP_1,
+ SLAVE_PORT_USB_HS,
+ SLAVE_PORT_PDM,
+ SLAVE_PORT_PERIPH_APU_CFG,
+ SLAVE_PORT_PNOC_MPU_CFG,
+ SLAVE_PORT_PRNG,
+ SLAVE_PORT_GW_PNOC_SNOC,
+ SLAVE_PORT_SERVICE_PNOC,
+
+ /* Config NOC slaves */
+ SLAVE_PORT_CLK_CTL = 1,
+ SLAVE_PORT_CNOC_MSS,
+ SLAVE_PORT_SECURITY,
+ SLAVE_PORT_TCSR,
+ SLAVE_PORT_TLMM,
+ SLAVE_PORT_CRYPTO_0_CFG,
+ SLAVE_PORT_CRYPTO_1_CFG,
+ SLAVE_PORT_IMEM_CFG,
+ SLAVE_PORT_MESSAGE_RAM,
+ SLAVE_PORT_BIMC_CFG,
+ SLAVE_PORT_BOOT_ROM,
+ SLAVE_PORT_CNOC_MNOC_MMSS_CFG,
+ SLAVE_PORT_PMIC_ARB,
+ SLAVE_PORT_SPDM_WRAPPER,
+ SLAVE_PORT_DEHR_CFG,
+ SLAVE_PORT_MPM,
+ SLAVE_PORT_QDSS_CFG,
+ SLAVE_PORT_RBCPR_CFG,
+ SLAVE_PORT_RBCPR_QDSS_APU_CFG,
+ SLAVE_PORT_CNOC_MNOC_CFG,
+ SLAVE_PORT_SNOC_MPU_CFG,
+ SLAVE_PORT_CNOC_ONOC_CFG,
+ SLAVE_PORT_PNOC_CFG,
+ SLAVE_PORT_SNOC_CFG,
+ SLAVE_PORT_EBI1_DLL_CFG,
+ SLAVE_PORT_PHY_APU_CFG,
+ SLAVE_PORT_EBI1_PHY_CFG,
+ SLAVE_PORT_RPM,
+ SLAVE_PORT_GW_CNOC_SNOC,
+ SLAVE_PORT_SERVICE_CNOC,
+};
+
+/* Hardware IDs for RPM */
+enum msm_bus_8974_mas_hw_id {
+ MAS_APPSS_PROC = 0,
+ MAS_AMSS_PROC,
+ MAS_MNOC_BIMC,
+ MAS_SNOC_BIMC,
+ MAS_CNOC_MNOC_MMSS_CFG,
+ MAS_CNOC_MNOC_CFG,
+ MAS_GFX3D,
+ MAS_JPEG,
+ MAS_MDP,
+ MAS_VIDEO_P0,
+ MAS_VIDEO_P1,
+ MAS_VFE,
+ MAS_CNOC_ONOC_CFG,
+ MAS_JPEG_OCMEM,
+ MAS_MDP_OCMEM,
+ MAS_VIDEO_P0_OCMEM,
+ MAS_VIDEO_P1_OCMEM,
+ MAS_VFE_OCMEM,
+ MAS_LPASS_AHB,
+ MAS_QDSS_BAM,
+ MAS_SNOC_CFG,
+ MAS_BIMC_SNOC,
+ MAS_CNOC_SNOC,
+ MAS_CRYPTO_CORE0,
+ MAS_CRYPTO_CORE1,
+ MAS_LPASS_PROC,
+ MAS_MSS,
+ MAS_MSS_NAV,
+ MAS_OCMEM_DMA,
+ MAS_PNOC_SNOC,
+ MAS_WCSS,
+ MAS_QDSS_ETR,
+ MAS_USB3,
+ MAS_SDCC_1,
+ MAS_SDCC_3,
+ MAS_SDCC_2,
+ MAS_SDCC_4,
+ MAS_TSIF,
+ MAS_BAM_DMA,
+ MAS_BLSP_2,
+ MAS_USB_HSIC,
+ MAS_BLSP_1,
+ MAS_USB_HS,
+ MAS_PNOC_CFG,
+ MAS_SNOC_PNOC,
+ MAS_RPM_INST,
+ MAS_RPM_DATA,
+ MAS_RPM_SYS,
+ MAS_DEHR,
+ MAS_QDSS_DAP,
+ MAS_SPDM,
+ MAS_TIC,
+ MAS_SNOC_CNOC,
+ MAS_OVNOC_SNOC,
+ MAS_OVNOC_ONOC,
+ MAS_V_OCMEM_GFX3D,
+ MAS_ONOC_OVNOC,
+ MAS_SNOC_OVNOC,
+};
+
+enum msm_bus_8974_slv_hw_id {
+ SLV_EBI = 0,
+ SLV_APSS_L2,
+ SLV_BIMC_SNOC,
+ SLV_CAMERA_CFG,
+ SLV_DISPLAY_CFG,
+ SLV_OCMEM_CFG,
+ SLV_CPR_CFG,
+ SLV_CPR_XPU_CFG,
+ SLV_MISC_CFG,
+ SLV_MISC_XPU_CFG,
+ SLV_VENUS_CFG,
+ SLV_GFX3D_CFG,
+ SLV_MMSS_CLK_CFG,
+ SLV_MMSS_CLK_XPU_CFG,
+ SLV_MNOC_MPU_CFG,
+ SLV_ONOC_MPU_CFG,
+ SLV_MMSS_BIMC,
+ SLV_SERVICE_MNOC,
+ SLV_OCMEM,
+ SLV_SERVICE_ONOC,
+ SLV_APPSS,
+ SLV_LPASS,
+ SLV_USB3,
+ SLV_WCSS,
+ SLV_SNOC_BIMC,
+ SLV_SNOC_CNOC,
+ SLV_OCIMEM,
+ SLV_SNOC_OCMEM,
+ SLV_SNOC_PNOC,
+ SLV_SERVICE_SNOC,
+ SLV_QDSS_STM,
+ SLV_SDCC_1,
+ SLV_SDCC_3,
+ SLV_SDCC_2,
+ SLV_SDCC_4,
+ SLV_TSIF,
+ SLV_BAM_DMA,
+ SLV_BLSP_2,
+ SLV_USB_HSIC,
+ SLV_BLSP_1,
+ SLV_USB_HS,
+ SLV_PDM,
+ SLV_PERIPH_APU_CFG,
+ SLV_MPU_CFG,
+ SLV_PRNG,
+ SLV_PNOC_SNOC,
+ SLV_SERVICE_PNOC,
+ SLV_CLK_CTL,
+ SLV_CNOC_MSS,
+ SLV_SECURITY,
+ SLV_TCSR,
+ SLV_TLMM,
+ SLV_CRYPTO_0_CFG,
+ SLV_CRYPTO_1_CFG,
+ SLV_IMEM_CFG,
+ SLV_MESSAGE_RAM,
+ SLV_BIMC_CFG,
+ SLV_BOOT_ROM,
+ SLV_CNOC_MNOC_MMSS_CFG,
+ SLV_PMIC_ARB,
+ SLV_SPDM_WRAPPER,
+ SLV_DEHR_CFG,
+ SLV_MPM,
+ SLV_QDSS_CFG,
+ SLV_RBCPR_CFG,
+ SLV_RBCPR_QDSS_APU_CFG,
+ SLV_CNOC_MNOC_CFG,
+ SLV_SNOC_MPU_CFG,
+ SLV_CNOC_ONOC_CFG,
+ SLV_PNOC_CFG,
+ SLV_SNOC_CFG,
+ SLV_EBI1_DLL_CFG,
+ SLV_PHY_APU_CFG,
+ SLV_EBI1_PHY_CFG,
+ SLV_RPM,
+ SLV_CNOC_SNOC,
+ SLV_SERVICE_CNOC,
+ SLV_SNOC_OVNOC,
+ SLV_ONOC_OVNOC,
+ SLV_OVNOC_ONOC,
+ SLV_OVNOC_SNOC,
+};
+
+static uint32_t master_iids[NMASTERS];
+static uint32_t slave_iids[NSLAVES];
+
+/* System NOC nodes */
+static int mport_lpass_ahb[] = {MASTER_PORT_LPASS_AHB,};
+static int mport_qdss_bam[] = {MASTER_PORT_QDSS_BAM,};
+static int mport_snoc_cfg[] = {MASTER_PORT_SNOC_CFG,};
+static int mport_gw_bimc_snoc[] = {MASTER_PORT_GW_BIMC_SNOC,};
+static int mport_gw_cnoc_snoc[] = {MASTER_PORT_GW_CNOC_SNOC,};
+static int mport_crypto_core0[] = {MASTER_PORT_CRYPTO_CORE0,};
+static int mport_crypto_core1[] = {MASTER_PORT_CRYPTO_CORE1};
+static int mport_lpass_proc[] = {MASTER_PORT_LPASS_PROC};
+static int mport_mss[] = {MASTER_PORT_MSS};
+static int mport_mss_nav[] = {MASTER_PORT_MSS_NAV};
+static int mport_ocmem_dma[] = {MASTER_PORT_OCMEM_DMA};
+static int mport_gw_pnoc_snoc[] = {MASTER_PORT_GW_PNOC_SNOC};
+static int mport_wcss[] = {MASTER_PORT_WCSS};
+static int mport_qdss_etr[] = {MASTER_PORT_QDSS_ETR};
+static int mport_usb3[] = {MASTER_PORT_USB3};
+
+static int sport_kmpss[] = {SLAVE_PORT_KMPSS};
+static int sport_lpass[] = {SLAVE_PORT_LPASS};
+static int sport_usb3[] = {SLAVE_PORT_USB3};
+static int sport_wcss[] = {SLAVE_PORT_WCSS};
+static int sport_gw_snoc_bimc[] = {
+ SLAVE_PORT_GW_SNOC_BIMC_P0,
+ SLAVE_PORT_GW_SNOC_BIMC_P1,
+ };
+static int sport_gw_snoc_cnoc[] = {SLAVE_PORT_GW_SNOC_CNOC};
+static int sport_ocimem[] = {SLAVE_PORT_OCIMEM};
+static int sport_snoc_ocmem[] = {SLAVE_PORT_SNOC_OCMEM};
+static int sport_gw_snoc_pnoc[] = {SLAVE_PORT_GW_SNOC_PNOC};
+static int sport_service_snoc[] = {SLAVE_PORT_SERVICE_SNOC};
+static int sport_qdss_stm[] = {SLAVE_PORT_QDSS_STM};
+
+
+/* MMSS NOC nodes */
+static int mport_gw_cnoc_mnoc_cfg[] = {
+ MASTER_PORT_GW_CNOC_MNOC_MMSS_CFG,
+ MASTER_PORT_GW_CNOC_MNOC_CFG,
+};
+static int mport_gfx3d[] = {
+ MASTER_PORT_GFX3D_PORT0,
+ MASTER_PORT_GFX3D_PORT1,
+};
+static int mport_jpeg[] = {MASTER_PORT_JPEG};
+static int mport_mdp[] = {MASTER_PORT_MDP};
+static int mport_video_port0[] = {MASTER_PORT_VIDEO_PORT0};
+static int mport_video_port1[] = {MASTER_PORT_VIDEO_PORT1};
+static int mport_vfe[] = {MASTER_PORT_VFE};
+
+static int sport_camera_cfg[] = {SLAVE_PORT_CAMERA_CFG};
+static int sport_display_cfg[] = {SLAVE_PORT_DISPLAY_CFG};
+static int sport_ocmem_cfg[] = {SLAVE_PORT_OCMEM_CFG};
+static int sport_cpr_cfg[] = {SLAVE_PORT_CPR_CFG};
+static int sport_cpr_xpu_cfg[] = {SLAVE_PORT_CPR_XPU_CFG,};
+static int sport_misc_cfg[] = {SLAVE_PORT_MISC_CFG};
+static int sport_misc_xpu_cfg[] = {SLAVE_PORT_MISC_XPU_CFG};
+static int sport_venus_cfg[] = {SLAVE_PORT_VENUS_CFG};
+static int sport_gfx3d_cfg[] = {SLAVE_PORT_GFX3D_CFG};
+static int sport_mmss_clk_cfg[] = {SLAVE_PORT_MMSS_CLK_CFG};
+static int sport_mmss_clk_xpu_cfg[] = {
+ SLAVE_PORT_MMSS_CLK_XPU_CFG
+};
+static int sport_mnoc_mpu_cfg[] = {SLAVE_PORT_MNOC_MPU_CFG};
+static int sport_onoc_mpu_cfg[] = {SLAVE_PORT_ONOC_MPU_CFG};
+static int sport_gw_mmss_bimc[] = {
+ SLAVE_PORT_GW_MMSS_BIMC_P0,
+ SLAVE_PORT_GW_MMSS_BIMC_P1,
+};
+static int sport_service_mnoc[] = {SLAVE_PORT_SERVICE_MNOC};
+
+/* BIMC Nodes */
+
+static int mport_kmpss_m0[] = {MASTER_PORT_KMPSS_M0,};
+static int mport_kmpss_m1[] = {MASTER_PORT_KMPSS_M1};
+static int mport_mss_proc[] = {MASTER_PORT_MSS_PROC};
+static int mport_gw_mnoc_bimc[] = {
+ MASTER_PORT_GW_MNOC_BIMC_0,
+ MASTER_PORT_GW_MNOC_BIMC_1,
+};
+static int mport_gw_snoc_bimc[] = {
+ MASTER_PORT_GW_SNOC_BIMC_0,
+ MASTER_PORT_GW_SNOC_BIMC_1,
+};
+
+static int sport_ebi1[] = {
+ SLAVE_PORT_EBI1_CH0,
+ SLAVE_PORT_EBI1_CH1,
+};
+static int sport_kmpss_l2[] = {SLAVE_PORT_KMPSS_L2,};
+static int sport_gw_bimc_snoc[] = {SLAVE_PORT_GW_BIMC_SNOC,};
+
+/* OCMEM NOC Nodes */
+static int mport_cnoc_onoc_cfg[] = {
+ MASTER_PORT_CNOC_ONOC_CFG,
+};
+static int mport_jpeg_ocmem[] = {MASTER_PORT_JPEG_OCMEM,};
+static int mport_mdp_ocmem[] = {MASTER_PORT_MDP_OCMEM,};
+static int mport_video_p0_ocmem[] = {
+ MASTER_PORT_VIDEO_P0_OCMEM,
+};
+static int mport_video_p1_ocmem[] = {
+ MASTER_PORT_VIDEO_P1_OCMEM,
+};
+static int mport_vfe_ocmem[] = {MASTER_PORT_VFE_OCMEM,};
+static int sport_ocmem[] = {
+ SLAVE_PORT_OCMEM_P0,
+ SLAVE_PORT_OCMEM_P1,
+};
+
+static int sport_service_onoc[] = {SLAVE_PORT_SERVICE_ONOC,};
+
+/* Peripheral NOC Nodes */
+static int mport_sdcc_1[] = {MASTER_PORT_SDCC_1,};
+static int mport_sdcc_3[] = {MASTER_PORT_SDCC_3,};
+static int mport_sdcc_2[] = {MASTER_PORT_SDCC_2,};
+static int mport_sdcc_4[] = {MASTER_PORT_SDCC_4,};
+static int mport_tsif[] = {MASTER_PORT_TSIF,};
+static int mport_bam_dma[] = {MASTER_PORT_BAM_DMA,};
+static int mport_blsp_2[] = {MASTER_PORT_BLSP_2,};
+static int mport_usb_hsic[] = {MASTER_PORT_USB_HSIC,};
+static int mport_blsp_1[] = {MASTER_PORT_BLSP_1,};
+static int mport_usb_hs[] = {MASTER_PORT_USB_HS,};
+static int mport_pnoc_cfg[] = {MASTER_PORT_PNOC_CFG,};
+static int mport_gw_snoc_pnoc[] = {MASTER_PORT_GW_SNOC_PNOC,};
+
+static int sport_sdcc_1[] = {SLAVE_PORT_SDCC_1,};
+static int sport_sdcc_3[] = {SLAVE_PORT_SDCC_3,};
+static int sport_sdcc_2[] = {SLAVE_PORT_SDCC_2,};
+static int sport_sdcc_4[] = {SLAVE_PORT_SDCC_4,};
+static int sport_tsif[] = {SLAVE_PORT_TSIF,};
+static int sport_bam_dma[] = {SLAVE_PORT_BAM_DMA,};
+static int sport_blsp_2[] = {SLAVE_PORT_BLSP_2,};
+static int sport_usb_hsic[] = {SLAVE_PORT_USB_HSIC,};
+static int sport_blsp_1[] = {SLAVE_PORT_BLSP_1,};
+static int sport_usb_hs[] = {SLAVE_PORT_USB_HS,};
+static int sport_pdm[] = {SLAVE_PORT_PDM,};
+static int sport_periph_apu_cfg[] = {
+ SLAVE_PORT_PERIPH_APU_CFG,
+};
+static int sport_pnoc_mpu_cfg[] = {SLAVE_PORT_PNOC_MPU_CFG,};
+static int sport_prng[] = {SLAVE_PORT_PRNG,};
+static int sport_gw_pnoc_snoc[] = {SLAVE_PORT_GW_PNOC_SNOC,};
+static int sport_service_pnoc[] = {SLAVE_PORT_SERVICE_PNOC,};
+
+/* Config NOC Nodes */
+static int mport_rpm_inst[] = {MASTER_PORT_RPM_INST,};
+static int mport_rpm_data[] = {MASTER_PORT_RPM_DATA,};
+static int mport_rpm_sys[] = {MASTER_PORT_RPM_SYS,};
+static int mport_dehr[] = {MASTER_PORT_DEHR,};
+static int mport_qdss_dap[] = {MASTER_PORT_QDSS_DAP,};
+static int mport_spdm[] = {MASTER_PORT_SPDM,};
+static int mport_tic[] = {MASTER_PORT_TIC,};
+static int mport_gw_snoc_cnoc[] = {MASTER_PORT_GW_SNOC_CNOC,};
+
+static int sport_clk_ctl[] = {SLAVE_PORT_CLK_CTL,};
+static int sport_cnoc_mss[] = {SLAVE_PORT_CNOC_MSS,};
+static int sport_security[] = {SLAVE_PORT_SECURITY,};
+static int sport_tcsr[] = {SLAVE_PORT_TCSR,};
+static int sport_tlmm[] = {SLAVE_PORT_TLMM,};
+static int sport_crypto_0_cfg[] = {SLAVE_PORT_CRYPTO_0_CFG,};
+static int sport_crypto_1_cfg[] = {SLAVE_PORT_CRYPTO_1_CFG,};
+static int sport_imem_cfg[] = {SLAVE_PORT_IMEM_CFG,};
+static int sport_message_ram[] = {SLAVE_PORT_MESSAGE_RAM,};
+static int sport_bimc_cfg[] = {SLAVE_PORT_BIMC_CFG,};
+static int sport_boot_rom[] = {SLAVE_PORT_BOOT_ROM,};
+static int sport_cnoc_mnoc_mmss_cfg[] = {SLAVE_PORT_CNOC_MNOC_MMSS_CFG,};
+static int sport_cnoc_mnoc_cfg[] = {SLAVE_PORT_CNOC_MNOC_CFG,};
+static int sport_pmic_arb[] = {SLAVE_PORT_PMIC_ARB,};
+static int sport_spdm_wrapper[] = {SLAVE_PORT_SPDM_WRAPPER,};
+static int sport_dehr_cfg[] = {SLAVE_PORT_DEHR_CFG,};
+static int sport_mpm[] = {SLAVE_PORT_MPM,};
+static int sport_qdss_cfg[] = {SLAVE_PORT_QDSS_CFG,};
+static int sport_rbcpr_cfg[] = {SLAVE_PORT_RBCPR_CFG,};
+static int sport_rbcpr_qdss_apu_cfg[] = {SLAVE_PORT_RBCPR_QDSS_APU_CFG,};
+static int sport_snoc_mpu_cfg[] = {SLAVE_PORT_SNOC_MPU_CFG,};
+static int sport_cnoc_onoc_cfg[] = {SLAVE_PORT_CNOC_ONOC_CFG,};
+static int sport_pnoc_cfg[] = {SLAVE_PORT_PNOC_CFG,};
+static int sport_snoc_cfg[] = {SLAVE_PORT_SNOC_CFG,};
+static int sport_ebi1_dll_cfg[] = {SLAVE_PORT_EBI1_DLL_CFG,};
+static int sport_phy_apu_cfg[] = {SLAVE_PORT_PHY_APU_CFG,};
+static int sport_ebi1_phy_cfg[] = {SLAVE_PORT_EBI1_PHY_CFG,};
+static int sport_rpm[] = {SLAVE_PORT_RPM,};
+static int sport_gw_cnoc_snoc[] = {SLAVE_PORT_GW_CNOC_SNOC,};
+static int sport_service_cnoc[] = {SLAVE_PORT_SERVICE_CNOC,};
+
+static int tier2[] = {MSM_BUS_BW_TIER2,};
+
+/*
+ * QOS Ports defined only when qos ports are different than
+ * master ports
+ **/
+static int qports_gemini[] = {0};
+static int qports_mdp[] = {1};
+static int qports_venus_p0[] = {4};
+static int qports_venus_p1[] = {5};
+static int qports_vfe[] = {6};
+static int qports_gemini_ocmem[] = {0};
+static int qports_venus_p0_ocmem[] = {2};
+static int qports_venus_p1_ocmem[] = {3};
+static int qports_vfe_ocmem[] = {4};
+static int qports_crypto_c0[] = {2};
+static int qports_crypto_c1[] = {3};
+static int qports_lpass_proc[] = {4};
+static int qports_ocmem_dma[] = {7};
+static int qports_gw_snoc_bimc[] = {5, 6};
+static int qports_kmpss[] = {0, 1};
+static int qports_lpass_ahb[] = {0};
+static int qports_qdss_bam[] = {1};
+static int qports_gw_pnoc_snoc[] = {8};
+static int qports_qdss_etr[] = {10};
+static int qports_usb3[] = {11};
+static int qports_oxili[] = {2, 3};
+static int qports_gw_mnoc_bimc[] = {3, 4};
+
+static struct msm_bus_node_info sys_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_LPASS_AHB,
+ .masterp = mport_lpass_ahb,
+ .num_mports = ARRAY_SIZE(mport_lpass_ahb),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_lpass_ahb,
+ .mas_hw_id = MAS_LPASS_AHB,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_BAM,
+ .masterp = mport_qdss_bam,
+ .num_mports = ARRAY_SIZE(mport_qdss_bam),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_qdss_bam,
+ .mas_hw_id = MAS_QDSS_BAM,
+ },
+ {
+ .id = MSM_BUS_MASTER_SNOC_CFG,
+ .masterp = mport_snoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_snoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_SNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_BIMC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_bimc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_bimc),
+ .masterp = mport_gw_bimc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_bimc_snoc),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BIMC_SNOC,
+ .slv_hw_id = SLV_SNOC_BIMC,
+ },
+ {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_cnoc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_cnoc),
+ .masterp = mport_gw_cnoc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_cnoc_snoc),
+ .buswidth = 8,
+ .mas_hw_id = MAS_CNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_CNOC,
+ },
+ {
+ .id = MSM_BUS_FAB_PERIPH_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_snoc_pnoc,
+ .num_sports = ARRAY_SIZE(sport_gw_snoc_pnoc),
+ .masterp = mport_gw_pnoc_snoc,
+ .num_mports = ARRAY_SIZE(mport_gw_pnoc_snoc),
+ .buswidth = 8,
+ .qport = qports_gw_pnoc_snoc,
+ .mas_hw_id = MAS_PNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_PNOC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .gateway = 1,
+ .buswidth = 8,
+ .mas_hw_id = MAS_OVNOC_SNOC,
+ .slv_hw_id = SLV_SNOC_OVNOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_CRYPTO_CORE0,
+ .masterp = mport_crypto_core0,
+ .num_mports = ARRAY_SIZE(mport_crypto_core0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_crypto_c0,
+ .mas_hw_id = MAS_CRYPTO_CORE0,
+ .hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_CRYPTO_CORE1,
+ .masterp = mport_crypto_core1,
+ .num_mports = ARRAY_SIZE(mport_crypto_core1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_crypto_c1,
+ .mas_hw_id = MAS_CRYPTO_CORE1,
+ .hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_LPASS_PROC,
+ .masterp = mport_lpass_proc,
+ .num_mports = ARRAY_SIZE(mport_lpass_proc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_lpass_proc,
+ .mas_hw_id = MAS_LPASS_PROC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS,
+ .masterp = mport_mss,
+ .num_mports = ARRAY_SIZE(mport_mss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_MSS,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS_NAV,
+ .masterp = mport_mss_nav,
+ .num_mports = ARRAY_SIZE(mport_mss_nav),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_MSS_NAV,
+ },
+ {
+ .id = MSM_BUS_MASTER_OCMEM_DMA,
+ .masterp = mport_ocmem_dma,
+ .num_mports = ARRAY_SIZE(mport_ocmem_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_ocmem_dma,
+ .mas_hw_id = MAS_OCMEM_DMA,
+ },
+ {
+ .id = MSM_BUS_MASTER_WCSS,
+ .masterp = mport_wcss,
+ .num_mports = ARRAY_SIZE(mport_wcss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mas_hw_id = MAS_WCSS,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_ETR,
+ .masterp = mport_qdss_etr,
+ .num_mports = ARRAY_SIZE(mport_qdss_etr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .qport = qports_qdss_etr,
+ .mode = NOC_QOS_MODE_FIXED,
+ .mas_hw_id = MAS_QDSS_ETR,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB3,
+ .masterp = mport_usb3,
+ .num_mports = ARRAY_SIZE(mport_usb3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_usb3,
+ .mas_hw_id = MAS_USB3,
+ .prio_rd = 2,
+ .prio_wr = 2,
+ .hw_sel = MSM_BUS_NOC,
+ .iface_clk_node = "msm_usb3",
+ },
+ {
+ .id = MSM_BUS_SLAVE_AMPSS,
+ .slavep = sport_kmpss,
+ .num_sports = ARRAY_SIZE(sport_kmpss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_APPSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_LPASS,
+ .slavep = sport_lpass,
+ .num_sports = ARRAY_SIZE(sport_lpass),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_LPASS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB3,
+ .slavep = sport_usb3,
+ .num_sports = ARRAY_SIZE(sport_usb3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB3,
+ },
+ {
+ .id = MSM_BUS_SLAVE_WCSS,
+ .slavep = sport_wcss,
+ .num_sports = ARRAY_SIZE(sport_wcss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_WCSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCIMEM,
+ .slavep = sport_ocimem,
+ .num_sports = ARRAY_SIZE(sport_ocimem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_OCIMEM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_OCMEM,
+ .slavep = sport_snoc_ocmem,
+ .num_sports = ARRAY_SIZE(sport_snoc_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_OCMEM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_SNOC,
+ .slavep = sport_service_snoc,
+ .num_sports = ARRAY_SIZE(sport_service_snoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_QDSS_STM,
+ .slavep = sport_qdss_stm,
+ .num_sports = ARRAY_SIZE(sport_qdss_stm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_QDSS_STM,
+ },
+};
+
+
+static struct msm_bus_node_info mmss_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_GRAPHICS_3D,
+ .masterp = mport_gfx3d,
+ .num_mports = ARRAY_SIZE(mport_gfx3d),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_oxili,
+ .mas_hw_id = MAS_GFX3D,
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG,
+ .masterp = mport_jpeg,
+ .num_mports = ARRAY_SIZE(mport_jpeg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .qport = qports_gemini,
+ .ws = 10000,
+ .mas_hw_id = MAS_JPEG,
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_PORT0,
+ .masterp = mport_mdp,
+ .num_mports = ARRAY_SIZE(mport_mdp),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .qport = qports_mdp,
+ .ws = 10000,
+ .mas_hw_id = MAS_MDP,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P0,
+ .masterp = mport_video_port0,
+ .num_mports = ARRAY_SIZE(mport_video_port0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_venus_p0,
+ .mas_hw_id = MAS_VIDEO_P0,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P1,
+ .masterp = mport_video_port1,
+ .num_mports = ARRAY_SIZE(mport_video_port1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_venus_p1,
+ .mas_hw_id = MAS_VIDEO_P1,
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE,
+ .masterp = mport_vfe,
+ .num_mports = ARRAY_SIZE(mport_vfe),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_NOC,
+ .perm_mode = NOC_QOS_PERM_MODE_BYPASS,
+ .mode = NOC_QOS_MODE_BYPASS,
+ .ws = 10000,
+ .qport = qports_vfe,
+ .mas_hw_id = MAS_VFE,
+ },
+ {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .gateway = 1,
+ .masterp = mport_gw_cnoc_mnoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_gw_cnoc_mnoc_cfg),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_RPM,
+ .mas_hw_id = MAS_CNOC_MNOC_MMSS_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_BIMC,
+ .gateway = 1,
+ .slavep = sport_gw_mmss_bimc,
+ .num_sports = ARRAY_SIZE(sport_gw_mmss_bimc),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_BIMC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CAMERA_CFG,
+ .slavep = sport_camera_cfg,
+ .num_sports = ARRAY_SIZE(sport_camera_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CAMERA_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_DISPLAY_CFG,
+ .slavep = sport_display_cfg,
+ .num_sports = ARRAY_SIZE(sport_display_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_DISPLAY_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCMEM_CFG,
+ .slavep = sport_ocmem_cfg,
+ .num_sports = ARRAY_SIZE(sport_ocmem_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_OCMEM_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CPR_CFG,
+ .slavep = sport_cpr_cfg,
+ .num_sports = ARRAY_SIZE(sport_cpr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CPR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CPR_XPU_CFG,
+ .slavep = sport_cpr_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_cpr_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_CPR_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MISC_CFG,
+ .slavep = sport_misc_cfg,
+ .num_sports = ARRAY_SIZE(sport_misc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MISC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MISC_XPU_CFG,
+ .slavep = sport_misc_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_misc_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MISC_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_VENUS_CFG,
+ .slavep = sport_venus_cfg,
+ .num_sports = ARRAY_SIZE(sport_venus_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_VENUS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_GRAPHICS_3D_CFG,
+ .slavep = sport_gfx3d_cfg,
+ .num_sports = ARRAY_SIZE(sport_gfx3d_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_GFX3D_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MMSS_CLK_CFG,
+ .slavep = sport_mmss_clk_cfg,
+ .num_sports = ARRAY_SIZE(sport_mmss_clk_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_CLK_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG,
+ .slavep = sport_mmss_clk_xpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_mmss_clk_xpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MMSS_CLK_XPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MNOC_MPU_CFG,
+ .slavep = sport_mnoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_mnoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_MNOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_ONOC_MPU_CFG,
+ .slavep = sport_onoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_onoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_ONOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_MNOC,
+ .slavep = sport_service_mnoc,
+ .num_sports = ARRAY_SIZE(sport_service_mnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .hw_sel = MSM_BUS_NOC,
+ .slv_hw_id = SLV_SERVICE_MNOC,
+ },
+};
+
+static struct msm_bus_node_info bimc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_AMPSS_M0,
+ .masterp = mport_kmpss_m0,
+ .num_mports = ARRAY_SIZE(mport_kmpss_m0),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_kmpss,
+ .ws = 10000,
+ .mas_hw_id = MAS_APPSS_PROC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_AMPSS_M1,
+ .masterp = mport_kmpss_m1,
+ .num_mports = ARRAY_SIZE(mport_kmpss_m1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_kmpss,
+ .ws = 10000,
+ .mas_hw_id = MAS_APPSS_PROC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_MSS_PROC,
+ .masterp = mport_mss_proc,
+ .num_mports = ARRAY_SIZE(mport_mss_proc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .hw_sel = MSM_BUS_RPM,
+ .mas_hw_id = MAS_AMSS_PROC,
+ },
+ {
+ .id = MSM_BUS_FAB_MMSS_NOC,
+ .gateway = 1,
+ .masterp = mport_gw_mnoc_bimc,
+ .num_mports = ARRAY_SIZE(mport_gw_mnoc_bimc),
+ .qport = qports_gw_mnoc_bimc,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_MNOC_BIMC,
+ .hw_sel = MSM_BUS_BIMC,
+ .mode = NOC_QOS_MODE_BYPASS,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_bimc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_bimc_snoc),
+ .masterp = mport_gw_snoc_bimc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_bimc),
+ .qport = qports_gw_snoc_bimc,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_SNOC_BIMC,
+ .slv_hw_id = SLV_BIMC_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI_CH0,
+ .slavep = sport_ebi1,
+ .num_sports = ARRAY_SIZE(sport_ebi1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI,
+ .mode = NOC_QOS_MODE_BYPASS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_AMPSS_L2,
+ .slavep = sport_kmpss_l2,
+ .num_sports = ARRAY_SIZE(sport_kmpss_l2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_APSS_L2,
+ },
+};
+
+static struct msm_bus_node_info ocmem_noc_info[] = {
+ {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .gateway = 1,
+ .buswidth = 16,
+ .mas_hw_id = MAS_OVNOC_ONOC,
+ .slv_hw_id = SLV_ONOC_OVNOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_JPEG_OCMEM,
+ .masterp = mport_jpeg_ocmem,
+ .num_mports = ARRAY_SIZE(mport_jpeg_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_gemini_ocmem,
+ .mas_hw_id = MAS_JPEG_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_MDP_OCMEM,
+ .masterp = mport_mdp_ocmem,
+ .num_mports = ARRAY_SIZE(mport_mdp_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .mas_hw_id = MAS_MDP_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P0_OCMEM,
+ .masterp = mport_video_p0_ocmem,
+ .num_mports = ARRAY_SIZE(mport_video_p0_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_venus_p0_ocmem,
+ .mas_hw_id = MAS_VIDEO_P0_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VIDEO_P1_OCMEM,
+ .masterp = mport_video_p1_ocmem,
+ .num_mports = ARRAY_SIZE(mport_video_p1_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_venus_p1_ocmem,
+ .mas_hw_id = MAS_VIDEO_P1_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_MASTER_VFE_OCMEM,
+ .masterp = mport_vfe_ocmem,
+ .num_mports = ARRAY_SIZE(mport_vfe_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .perm_mode = NOC_QOS_PERM_MODE_FIXED,
+ .mode = NOC_QOS_MODE_FIXED,
+ .qport = qports_vfe_ocmem,
+ .mas_hw_id = MAS_VFE_OCMEM,
+ .hw_sel = MSM_BUS_NOC,
+ .prio_rd = 1,
+ .prio_wr = 1,
+ },
+ {
+ .id = MSM_BUS_MASTER_CNOC_ONOC_CFG,
+ .masterp = mport_cnoc_onoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_cnoc_onoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .mas_hw_id = MAS_CNOC_ONOC_CFG,
+ .hw_sel = MSM_BUS_NOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_ONOC,
+ .slavep = sport_service_onoc,
+ .num_sports = ARRAY_SIZE(sport_service_onoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .slv_hw_id = SLV_SERVICE_ONOC,
+ },
+};
+
+static struct msm_bus_node_info periph_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_PNOC_CFG,
+ .masterp = mport_pnoc_cfg,
+ .num_mports = ARRAY_SIZE(mport_pnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_PNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_1,
+ .masterp = mport_sdcc_1,
+ .num_mports = ARRAY_SIZE(mport_sdcc_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_1,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_3,
+ .masterp = mport_sdcc_3,
+ .num_mports = ARRAY_SIZE(mport_sdcc_3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_3,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_4,
+ .masterp = mport_sdcc_4,
+ .num_mports = ARRAY_SIZE(mport_sdcc_4),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_4,
+ },
+ {
+ .id = MSM_BUS_MASTER_SDCC_2,
+ .masterp = mport_sdcc_2,
+ .num_mports = ARRAY_SIZE(mport_sdcc_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SDCC_2,
+ },
+ {
+ .id = MSM_BUS_MASTER_TSIF,
+ .masterp = mport_tsif,
+ .num_mports = ARRAY_SIZE(mport_tsif),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_TSIF,
+ },
+ {
+ .id = MSM_BUS_MASTER_BAM_DMA,
+ .masterp = mport_bam_dma,
+ .num_mports = ARRAY_SIZE(mport_bam_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BAM_DMA,
+ },
+ {
+ .id = MSM_BUS_MASTER_BLSP_2,
+ .masterp = mport_blsp_2,
+ .num_mports = ARRAY_SIZE(mport_blsp_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BLSP_2,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB_HSIC,
+ .masterp = mport_usb_hsic,
+ .num_mports = ARRAY_SIZE(mport_usb_hsic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_USB_HSIC,
+ },
+ {
+ .id = MSM_BUS_MASTER_BLSP_1,
+ .masterp = mport_blsp_1,
+ .num_mports = ARRAY_SIZE(mport_blsp_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_BLSP_1,
+ },
+ {
+ .id = MSM_BUS_MASTER_USB_HS,
+ .masterp = mport_usb_hs,
+ .num_mports = ARRAY_SIZE(mport_usb_hs),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_USB_HS,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_pnoc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_pnoc_snoc),
+ .masterp = mport_gw_snoc_pnoc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_pnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PNOC_SNOC,
+ .mas_hw_id = MAS_SNOC_PNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_1,
+ .slavep = sport_sdcc_1,
+ .num_sports = ARRAY_SIZE(sport_sdcc_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_1,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_3,
+ .slavep = sport_sdcc_3,
+ .num_sports = ARRAY_SIZE(sport_sdcc_3),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_3,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_2,
+ .slavep = sport_sdcc_2,
+ .num_sports = ARRAY_SIZE(sport_sdcc_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_2,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SDCC_4,
+ .slavep = sport_sdcc_4,
+ .num_sports = ARRAY_SIZE(sport_sdcc_4),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SDCC_4,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TSIF,
+ .slavep = sport_tsif,
+ .num_sports = ARRAY_SIZE(sport_tsif),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TSIF,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BAM_DMA,
+ .slavep = sport_bam_dma,
+ .num_sports = ARRAY_SIZE(sport_bam_dma),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BAM_DMA,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BLSP_2,
+ .slavep = sport_blsp_2,
+ .num_sports = ARRAY_SIZE(sport_blsp_2),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BLSP_2,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB_HSIC,
+ .slavep = sport_usb_hsic,
+ .num_sports = ARRAY_SIZE(sport_usb_hsic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB_HSIC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BLSP_1,
+ .slavep = sport_blsp_1,
+ .num_sports = ARRAY_SIZE(sport_blsp_1),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BLSP_1,
+ },
+ {
+ .id = MSM_BUS_SLAVE_USB_HS,
+ .slavep = sport_usb_hs,
+ .num_sports = ARRAY_SIZE(sport_usb_hs),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_USB_HS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PDM,
+ .slavep = sport_pdm,
+ .num_sports = ARRAY_SIZE(sport_pdm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PDM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PERIPH_APU_CFG,
+ .slavep = sport_periph_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_periph_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PERIPH_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PNOC_MPU_CFG,
+ .slavep = sport_pnoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_pnoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PRNG,
+ .slavep = sport_prng,
+ .num_sports = ARRAY_SIZE(sport_prng),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PRNG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_PNOC,
+ .slavep = sport_service_pnoc,
+ .num_sports = ARRAY_SIZE(sport_service_pnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_PNOC,
+ },
+};
+
+static struct msm_bus_node_info config_noc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_RPM_INST,
+ .masterp = mport_rpm_inst,
+ .num_mports = ARRAY_SIZE(mport_rpm_inst),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_INST,
+ },
+ {
+ .id = MSM_BUS_MASTER_RPM_DATA,
+ .masterp = mport_rpm_data,
+ .num_mports = ARRAY_SIZE(mport_rpm_data),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_DATA,
+ },
+ {
+ .id = MSM_BUS_MASTER_RPM_SYS,
+ .masterp = mport_rpm_sys,
+ .num_mports = ARRAY_SIZE(mport_rpm_sys),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_RPM_SYS,
+ },
+ {
+ .id = MSM_BUS_MASTER_DEHR,
+ .masterp = mport_dehr,
+ .num_mports = ARRAY_SIZE(mport_dehr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_DEHR,
+ },
+ {
+ .id = MSM_BUS_MASTER_QDSS_DAP,
+ .masterp = mport_qdss_dap,
+ .num_mports = ARRAY_SIZE(mport_qdss_dap),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_QDSS_DAP,
+ },
+ {
+ .id = MSM_BUS_MASTER_SPDM,
+ .masterp = mport_spdm,
+ .num_mports = ARRAY_SIZE(mport_spdm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SPDM,
+ },
+ {
+ .id = MSM_BUS_MASTER_TIC,
+ .masterp = mport_tic,
+ .num_mports = ARRAY_SIZE(mport_tic),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_TIC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CLK_CTL,
+ .slavep = sport_clk_ctl,
+ .num_sports = ARRAY_SIZE(sport_clk_ctl),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CLK_CTL,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MSS,
+ .slavep = sport_cnoc_mss,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mss),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MSS,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SECURITY,
+ .slavep = sport_security,
+ .num_sports = ARRAY_SIZE(sport_security),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SECURITY,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TCSR,
+ .slavep = sport_tcsr,
+ .num_sports = ARRAY_SIZE(sport_tcsr),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TCSR,
+ },
+ {
+ .id = MSM_BUS_SLAVE_TLMM,
+ .slavep = sport_tlmm,
+ .num_sports = ARRAY_SIZE(sport_tlmm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_TLMM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CRYPTO_0_CFG,
+ .slavep = sport_crypto_0_cfg,
+ .num_sports = ARRAY_SIZE(sport_crypto_0_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CRYPTO_0_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CRYPTO_1_CFG,
+ .slavep = sport_crypto_1_cfg,
+ .num_sports = ARRAY_SIZE(sport_crypto_1_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CRYPTO_1_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_IMEM_CFG,
+ .slavep = sport_imem_cfg,
+ .num_sports = ARRAY_SIZE(sport_imem_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_IMEM_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MESSAGE_RAM,
+ .slavep = sport_message_ram,
+ .num_sports = ARRAY_SIZE(sport_message_ram),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MESSAGE_RAM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BIMC_CFG,
+ .slavep = sport_bimc_cfg,
+ .num_sports = ARRAY_SIZE(sport_bimc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BIMC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_BOOT_ROM,
+ .slavep = sport_boot_rom,
+ .num_sports = ARRAY_SIZE(sport_boot_rom),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_BOOT_ROM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PMIC_ARB,
+ .slavep = sport_pmic_arb,
+ .num_sports = ARRAY_SIZE(sport_pmic_arb),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PMIC_ARB,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SPDM_WRAPPER,
+ .slavep = sport_spdm_wrapper,
+ .num_sports = ARRAY_SIZE(sport_spdm_wrapper),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SPDM_WRAPPER,
+ },
+ {
+ .id = MSM_BUS_SLAVE_DEHR_CFG,
+ .slavep = sport_dehr_cfg,
+ .num_sports = ARRAY_SIZE(sport_dehr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_DEHR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_MPM,
+ .slavep = sport_mpm,
+ .num_sports = ARRAY_SIZE(sport_mpm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_MPM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_QDSS_CFG,
+ .slavep = sport_qdss_cfg,
+ .num_sports = ARRAY_SIZE(sport_qdss_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_QDSS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RBCPR_CFG,
+ .slavep = sport_rbcpr_cfg,
+ .num_sports = ARRAY_SIZE(sport_rbcpr_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RBCPR_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG,
+ .slavep = sport_rbcpr_qdss_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_rbcpr_qdss_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RBCPR_QDSS_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .slavep = sport_gw_cnoc_snoc,
+ .num_sports = ARRAY_SIZE(sport_gw_cnoc_snoc),
+ .masterp = mport_gw_snoc_cnoc,
+ .num_mports = ARRAY_SIZE(mport_gw_snoc_cnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_SNOC_CNOC,
+ .slv_hw_id = SLV_CNOC_SNOC,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_ONOC_CFG,
+ .slavep = sport_cnoc_onoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_onoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_ONOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG,
+ .slavep = sport_cnoc_mnoc_mmss_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_mmss_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MNOC_MMSS_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_CNOC_MNOC_CFG,
+ .slavep = sport_cnoc_mnoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_cnoc_mnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_CNOC_MNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PNOC_CFG,
+ .slavep = sport_pnoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_pnoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_MPU_CFG,
+ .slavep = sport_snoc_mpu_cfg,
+ .num_sports = ARRAY_SIZE(sport_snoc_mpu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_MPU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SNOC_CFG,
+ .slavep = sport_snoc_cfg,
+ .num_sports = ARRAY_SIZE(sport_snoc_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SNOC_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI1_DLL_CFG,
+ .slavep = sport_ebi1_dll_cfg,
+ .num_sports = ARRAY_SIZE(sport_ebi1_dll_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI1_DLL_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_PHY_APU_CFG,
+ .slavep = sport_phy_apu_cfg,
+ .num_sports = ARRAY_SIZE(sport_phy_apu_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_PHY_APU_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_EBI1_PHY_CFG,
+ .slavep = sport_ebi1_phy_cfg,
+ .num_sports = ARRAY_SIZE(sport_ebi1_phy_cfg),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_EBI1_PHY_CFG,
+ },
+ {
+ .id = MSM_BUS_SLAVE_RPM,
+ .slavep = sport_rpm,
+ .num_sports = ARRAY_SIZE(sport_rpm),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_RPM,
+ },
+ {
+ .id = MSM_BUS_SLAVE_SERVICE_CNOC,
+ .slavep = sport_service_cnoc,
+ .num_sports = ARRAY_SIZE(sport_service_cnoc),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .slv_hw_id = SLV_SERVICE_CNOC,
+ },
+};
+
+/* A virtual NoC is needed for connection to OCMEM */
+static struct msm_bus_node_info ocmem_vnoc_info[] = {
+ {
+ .id = MSM_BUS_MASTER_V_OCMEM_GFX3D,
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 8,
+ .mas_hw_id = MAS_V_OCMEM_GFX3D,
+ },
+ {
+ .id = MSM_BUS_SLAVE_OCMEM,
+ .slavep = sport_ocmem,
+ .num_sports = ARRAY_SIZE(sport_ocmem),
+ .tier = tier2,
+ .num_tiers = ARRAY_SIZE(tier2),
+ .buswidth = 16,
+ .slv_hw_id = SLV_OCMEM,
+ .tier = tier2,
+ .slaveclk[DUAL_CTX] = "ocmem_clk",
+ .slaveclk[ACTIVE_CTX] = "ocmem_a_clk",
+ },
+ {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .gateway = 1,
+ .buswidth = 8,
+ .ws = 10000,
+ .mas_hw_id = MAS_SNOC_OVNOC,
+ .slv_hw_id = SLV_OVNOC_SNOC,
+ },
+ {
+ .id = MSM_BUS_FAB_OCMEM_NOC,
+ .gateway = 1,
+ .buswidth = 16,
+ .ws = 10000,
+ .mas_hw_id = MAS_ONOC_OVNOC,
+ .slv_hw_id = SLV_OVNOC_ONOC,
+ },
+};
+
+static void msm_bus_board_assign_iids(struct msm_bus_fabric_registration
+ *fabreg, int fabid)
+{
+ int i;
+ for (i = 0; i < fabreg->len; i++) {
+ if (!fabreg->info[i].gateway) {
+ fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+ if (fabreg->info[i].id < SLAVE_ID_KEY) {
+ WARN(fabreg->info[i].id >= NMASTERS,
+ "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ master_iids[fabreg->info[i].id] =
+ fabreg->info[i].priv_id;
+ } else {
+ WARN((fabreg->info[i].id - SLAVE_ID_KEY) >=
+ NSLAVES, "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+ = fabreg->info[i].priv_id;
+ }
+ } else {
+ fabreg->info[i].priv_id = fabreg->info[i].id;
+ }
+ }
+}
+
+static int msm_bus_board_8974_get_iid(int id)
+{
+ if ((id < SLAVE_ID_KEY && id >= NMASTERS) ||
+ id >= (SLAVE_ID_KEY + NSLAVES)) {
+ MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+ return -EINVAL;
+ }
+
+ return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+ slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+ return -ENXIO;
+}
+
+static struct msm_bus_board_algorithm msm_bus_board_algo = {
+ .board_nfab = NFAB_8974,
+ .get_iid = msm_bus_board_8974_get_iid,
+ .assign_iids = msm_bus_board_assign_iids,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata = {
+ .id = MSM_BUS_FAB_SYS_NOC,
+ .name = "msm_sys_noc",
+ .info = sys_noc_info,
+ .len = ARRAY_SIZE(sys_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 15,
+ .nslaves = 12,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata = {
+ .id = MSM_BUS_FAB_MMSS_NOC,
+ .name = "msm_mmss_noc",
+ .info = mmss_noc_info,
+ .len = ARRAY_SIZE(mmss_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 9,
+ .nslaves = 16,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata = {
+ .id = MSM_BUS_FAB_BIMC,
+ .name = "msm_bimc",
+ .info = bimc_info,
+ .len = ARRAY_SIZE(bimc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "mem_clk",
+ .fabclk[ACTIVE_CTX] = "mem_a_clk",
+ .nmasters = 7,
+ .nslaves = 4,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_BIMC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata = {
+ .id = MSM_BUS_FAB_OCMEM_NOC,
+ .name = "msm_ocmem_noc",
+ .info = ocmem_noc_info,
+ .len = ARRAY_SIZE(ocmem_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 6,
+ .nslaves = 3,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .qos_freq = 4800,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata = {
+ .id = MSM_BUS_FAB_PERIPH_NOC,
+ .name = "msm_periph_noc",
+ .info = periph_noc_info,
+ .len = ARRAY_SIZE(periph_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 12,
+ .nslaves = 16,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata = {
+ .id = MSM_BUS_FAB_CONFIG_NOC,
+ .name = "msm_config_noc",
+ .info = config_noc_info,
+ .len = ARRAY_SIZE(config_noc_info),
+ .ahb = 0,
+ .fabclk[DUAL_CTX] = "bus_clk",
+ .fabclk[ACTIVE_CTX] = "bus_a_clk",
+ .nmasters = 8,
+ .nslaves = 30,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .rpm_enabled = 1,
+};
+
+struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata = {
+ .id = MSM_BUS_FAB_OCMEM_VNOC,
+ .name = "msm_ocmem_vnoc",
+ .info = ocmem_vnoc_info,
+ .len = ARRAY_SIZE(ocmem_vnoc_info),
+ .ahb = 0,
+ .nmasters = 5,
+ .nslaves = 4,
+ .ntieredslaves = 0,
+ .board_algo = &msm_bus_board_algo,
+ .hw_sel = MSM_BUS_NOC,
+ .virt = 1,
+ .rpm_enabled = 1,
+};
+
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata)
+{
+ pdata->board_algo = &msm_bus_board_algo;
+}
+
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+ int nfab)
+{
+ if (nfab <= 0)
+ return;
+
+ msm_bus_board_algo.board_nfab = nfab;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_client_api.c b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
new file mode 100644
index 0000000000000..5b386794fedf2
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
@@ -0,0 +1,142 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_arb_ops arb_ops;
+
+/**
+ * msm_bus_scale_register_client() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ if (arb_ops.register_client)
+ return arb_ops.register_client(pdata);
+ else {
+ pr_err("%s: Bus driver not ready.",
+ __func__);
+ return 0;
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_register_client);
+
+/**
+ * msm_bus_scale_client_update_request() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ if (arb_ops.update_request)
+ return arb_ops.update_request(cl, index);
+ else {
+ pr_err("%s: Bus driver not ready.",
+ __func__);
+ return -EPROBE_DEFER;
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_request);
+
+/**
+ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
+ * @cl: Handle to the client
+ */
+void msm_bus_scale_unregister_client(uint32_t cl)
+{
+ if (arb_ops.unregister_client)
+ arb_ops.unregister_client(cl);
+ else {
+ pr_err("%s: Bus driver not ready.",
+ __func__);
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+
+/**
+ * msm_bus_scale_register() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+ if (arb_ops.register_cl)
+ return arb_ops.register_cl(mas, slv, name, active_only);
+ else {
+ pr_err("%s: Bus driver not ready.",
+ __func__);
+ return ERR_PTR(-EPROBE_DEFER);
+
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_register);
+
+/**
+ * msm_bus_scale_client_update_bw() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+ if (arb_ops.update_request)
+ return arb_ops.update_bw(cl, ab, ib);
+ else {
+ pr_err("%s: Bus driver not ready.", __func__);
+ return -EPROBE_DEFER;
+ }
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw);
+
+/**
+ * msm_bus_scale_unregister() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ */
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+ if (arb_ops.unregister)
+ arb_ops.unregister(cl);
+ else
+ pr_err("%s: Bus driver not ready.",
+ __func__);
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_config.c b/drivers/soc/qcom/msm_bus/msm_bus_config.c
new file mode 100644
index 0000000000000..ea7fe7f2e1dd8
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_config.c
@@ -0,0 +1,78 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static DEFINE_MUTEX(msm_bus_config_lock);
+
+/**
+ * msm_bus_axi_porthalt() - Halt the given axi master port
+ * @master_port: AXI Master port to be halted
+ */
+int msm_bus_axi_porthalt(int master_port)
+{
+ int ret = 0;
+ int priv_id;
+ struct msm_bus_fabric_device *fabdev;
+
+ priv_id = msm_bus_board_get_iid(master_port);
+ MSM_BUS_DBG("master_port: %d iid: %d fabid%d\n",
+ master_port, priv_id, GET_FABID(priv_id));
+ fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id));
+ if (IS_ERR_OR_NULL(fabdev)) {
+ MSM_BUS_ERR("Fabric device not found for mport: %d\n",
+ master_port);
+ return -ENODEV;
+ }
+ mutex_lock(&msm_bus_config_lock);
+ ret = fabdev->algo->port_halt(fabdev, priv_id);
+ mutex_unlock(&msm_bus_config_lock);
+ return ret;
+}
+EXPORT_SYMBOL(msm_bus_axi_porthalt);
+
+/**
+ * msm_bus_axi_portunhalt() - Unhalt the given axi master port
+ * @master_port: AXI Master port to be unhalted
+ */
+int msm_bus_axi_portunhalt(int master_port)
+{
+ int ret = 0;
+ int priv_id;
+ struct msm_bus_fabric_device *fabdev;
+
+ priv_id = msm_bus_board_get_iid(master_port);
+ MSM_BUS_DBG("master_port: %d iid: %d fabid: %d\n",
+ master_port, priv_id, GET_FABID(priv_id));
+ fabdev = msm_bus_get_fabric_device(GET_FABID(priv_id));
+ if (IS_ERR_OR_NULL(fabdev)) {
+ MSM_BUS_ERR("Fabric device not found for mport: %d\n",
+ master_port);
+ return -ENODEV;
+ }
+ mutex_lock(&msm_bus_config_lock);
+ ret = fabdev->algo->port_unhalt(fabdev, priv_id);
+ mutex_unlock(&msm_bus_config_lock);
+ return ret;
+}
+EXPORT_SYMBOL(msm_bus_axi_portunhalt);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.c b/drivers/soc/qcom/msm_bus/msm_bus_core.c
new file mode 100644
index 0000000000000..93c8f4f9ee343
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.c
@@ -0,0 +1,119 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static atomic_t num_fab = ATOMIC_INIT(0);
+
+int msm_bus_get_num_fab(void)
+{
+ return atomic_read(&num_fab);
+}
+
+int msm_bus_device_match(struct device *dev, void *id)
+{
+ struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+ if (!fabdev) {
+ MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
+ return 0;
+ }
+ return fabdev->id == *(int *)id;
+}
+
+struct bus_type msm_bus_type = {
+ .name = "msm-bus-type",
+};
+EXPORT_SYMBOL(msm_bus_type);
+
+/**
+ * msm_bus_get_fabric_device() - This function is used to search for
+ * the fabric device on the bus
+ * @fabid: Fabric id
+ * Function returns: Pointer to the fabric device
+ */
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
+{
+ struct device *dev;
+ struct msm_bus_fabric_device *fabric;
+ dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
+ msm_bus_device_match);
+ if (!dev)
+ return NULL;
+ fabric = to_msm_bus_fabric_device(dev);
+ return fabric;
+}
+
+/**
+ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
+ * @fabdev: Fabric device to be registered
+ */
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
+{
+ int ret = 0;
+ fabdev->dev.bus = &msm_bus_type;
+ ret = dev_set_name(&fabdev->dev, fabdev->name);
+ if (ret) {
+ MSM_BUS_ERR("error setting dev name\n");
+ goto err;
+ }
+ ret = device_register(&fabdev->dev);
+ if (ret < 0) {
+ MSM_BUS_ERR("error registering device%d %s\n",
+ ret, fabdev->name);
+ goto err;
+ }
+ atomic_inc(&num_fab);
+err:
+ return ret;
+}
+
+/**
+ * msm_bus_fabric_device_unregister() - Unregisters the fabric
+ * devices from the msm bus
+ */
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
+{
+ device_unregister(&fabdev->dev);
+ atomic_dec(&num_fab);
+}
+
+static void __exit msm_bus_exit(void)
+{
+ bus_unregister(&msm_bus_type);
+}
+
+static int __init msm_bus_init(void)
+{
+ int retval = 0;
+ retval = bus_register(&msm_bus_type);
+ if (retval)
+ MSM_BUS_ERR("bus_register error! %d\n",
+ retval);
+ return retval;
+}
+postcore_initcall(msm_bus_init);
+module_exit(msm_bus_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION("0.2");
+MODULE_ALIAS("platform:msm_bus");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
new file mode 100644
index 0000000000000..c35a45caf2364
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -0,0 +1,403 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
+#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/radix-tree.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+
+#define MSM_BUS_DBG(msg, ...) \
+ pr_debug(msg, ## __VA_ARGS__)
+#define MSM_BUS_ERR(msg, ...) \
+ pr_err(msg, ## __VA_ARGS__)
+#define MSM_BUS_WARN(msg, ...) \
+ pr_warn(msg, ## __VA_ARGS__)
+#define MSM_FAB_ERR(msg, ...) \
+ dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
+
+#define IS_MASTER_VALID(mas) \
+ (((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+ ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+ (((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+ ((fab_pdata->il_flag) ? ((bw < 0) \
+ ? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+ ((fab_pdata->il_flag) ? (n) : 1)
+#define KBTOB(a) (a * 1000ULL)
+
+enum msm_bus_dbg_op_type {
+ MSM_BUS_DBG_UNREGISTER = -2,
+ MSM_BUS_DBG_REGISTER,
+ MSM_BUS_DBG_OP = 1,
+};
+
+enum msm_bus_hw_sel {
+ MSM_BUS_RPM = 0,
+ MSM_BUS_NOC,
+ MSM_BUS_BIMC,
+};
+
+struct msm_bus_arb_ops {
+ uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
+ int (*update_request)(uint32_t cl, unsigned int index);
+ void (*unregister_client)(uint32_t cl);
+ struct msm_bus_client_handle*
+ (*register_cl)(uint32_t mas, uint32_t slv, char *name,
+ bool active_only);
+ int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+ void (*unregister)(struct msm_bus_client_handle *cl);
+};
+
+enum {
+ SLAVE_NODE,
+ MASTER_NODE,
+ CLK_NODE,
+ NR_LIM_NODE,
+};
+
+
+extern struct bus_type msm_bus_type;
+extern struct msm_bus_arb_ops arb_ops;
+extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
+
+struct msm_bus_node_info {
+ unsigned int id;
+ unsigned int priv_id;
+ unsigned int mas_hw_id;
+ unsigned int slv_hw_id;
+ int gateway;
+ int *masterp;
+ int *qport;
+ int num_mports;
+ int *slavep;
+ int num_sports;
+ int *tier;
+ int num_tiers;
+ int ahb;
+ int hw_sel;
+ const char *slaveclk[NUM_CTX];
+ const char *memclk[NUM_CTX];
+ const char *iface_clk_node;
+ unsigned int buswidth;
+ unsigned int ws;
+ unsigned int mode;
+ unsigned int perm_mode;
+ unsigned int prio_lvl;
+ unsigned int prio_rd;
+ unsigned int prio_wr;
+ unsigned int prio1;
+ unsigned int prio0;
+ unsigned int num_thresh;
+ u64 *th;
+ u64 cur_lim_bw;
+ unsigned int mode_thresh;
+ bool dual_conf;
+ u64 *bimc_bw;
+ bool nr_lim;
+ u32 ff;
+ bool rt_mas;
+ u32 bimc_gp;
+ u32 bimc_thmp;
+ u64 floor_bw;
+ const char *name;
+};
+
+struct path_node {
+ uint64_t clk[NUM_CTX];
+ uint64_t bw[NUM_CTX];
+ uint64_t *sel_clk;
+ uint64_t *sel_bw;
+ int next;
+};
+
+struct msm_bus_link_info {
+ uint64_t clk[NUM_CTX];
+ uint64_t *sel_clk;
+ uint64_t memclk;
+ int64_t bw[NUM_CTX];
+ int64_t *sel_bw;
+ int *tier;
+ int num_tiers;
+};
+
+struct nodeclk {
+ struct clk *clk;
+ uint64_t rate;
+ bool dirty;
+ bool enable;
+};
+
+struct msm_bus_inode_info {
+ struct msm_bus_node_info *node_info;
+ uint64_t max_bw;
+ uint64_t max_clk;
+ uint64_t cur_lim_bw;
+ uint64_t cur_prg_bw;
+ struct msm_bus_link_info link_info;
+ int num_pnodes;
+ struct path_node *pnode;
+ int commit_index;
+ struct nodeclk nodeclk[NUM_CTX];
+ struct nodeclk memclk[NUM_CTX];
+ struct nodeclk iface_clk;
+ void *hw_data;
+};
+
+struct msm_bus_node_hw_info {
+ bool dirty;
+ unsigned int hw_id;
+ uint64_t bw;
+};
+
+struct msm_bus_hw_algorithm {
+ int (*allocate_commit_data)(struct msm_bus_fabric_registration
+ *fab_pdata, void **cdata, int ctx);
+ void *(*allocate_hw_data)(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *fab_pdata);
+ void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+ void (*free_commit_data)(void *cdata);
+ void (*update_bw)(struct msm_bus_inode_info *hop,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_fabric_registration *fab_pdata,
+ void *sel_cdata, int *master_tiers,
+ int64_t add_bw);
+ void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves);
+ int (*commit)(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
+ int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+ int (*port_halt)(uint32_t haltid, uint8_t mport);
+ void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info,
+ uint64_t req_clk, uint64_t req_bw);
+ void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
+ struct msm_bus_inode_info *info);
+ bool (*update_bw_reg)(int mode);
+};
+
+struct msm_bus_fabric_device {
+ int id;
+ const char *name;
+ struct device dev;
+ const struct msm_bus_fab_algorithm *algo;
+ const struct msm_bus_board_algorithm *board_algo;
+ struct msm_bus_hw_algorithm hw_algo;
+ int visited;
+ int num_nr_lim;
+ u64 nr_lim_thresh;
+ u32 eff_fact;
+};
+#define to_msm_bus_fabric_device(d) container_of(d, \
+ struct msm_bus_fabric_device, d)
+
+struct msm_bus_fabric {
+ struct msm_bus_fabric_device fabdev;
+ int ahb;
+ void *cdata[NUM_CTX];
+ bool arb_dirty;
+ bool clk_dirty;
+ struct radix_tree_root fab_tree;
+ int num_nodes;
+ struct list_head gateways;
+ struct msm_bus_inode_info info;
+ struct msm_bus_fabric_registration *pdata;
+ void *hw_data;
+};
+#define to_msm_bus_fabric(d) container_of(d, \
+ struct msm_bus_fabric, d)
+
+
+struct msm_bus_fab_algorithm {
+ int (*update_clks)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *pme, int index,
+ uint64_t curr_clk, uint64_t req_clk,
+ uint64_t bwsum, int flag, int ctx,
+ unsigned int cl_active_flag);
+ int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
+ int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
+ int (*commit)(struct msm_bus_fabric_device *fabdev);
+ struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
+ *fabdev, int id);
+ struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
+ *fabdev, int id);
+ struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
+ void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
+ msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
+ int64_t add_bw, int *master_tiers, int ctx);
+ void (*config_master)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info, uint64_t req_clk,
+ uint64_t req_bw);
+ void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info);
+};
+
+struct msm_bus_board_algorithm {
+ int board_nfab;
+ void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
+ int fabid);
+ int (*get_iid)(int id);
+};
+
+/**
+ * Used to store the list of fabrics and other info to be
+ * maintained outside the fabric structure.
+ * Used while calculating path, and to find fabric ptrs
+ */
+struct msm_bus_fabnodeinfo {
+ struct list_head list;
+ struct msm_bus_inode_info *info;
+};
+
+struct msm_bus_client {
+ int id;
+ struct msm_bus_scale_pdata *pdata;
+ int *src_pnode;
+ int curr;
+};
+
+uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
+int msm_bus_get_num_fab(void);
+
+
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+ int nfab);
+#if defined(CONFIG_QCOM_SMD_RPM)
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata);
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves);
+#else
+static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ return 0;
+}
+static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ return 0;
+}
+static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
+ const int max_size, void *cdata, int nmasters, int nslaves,
+ int ntslaves)
+{
+}
+#endif
+
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_MSM_BUS_SCALING)
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+ uint32_t cl);
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+ int nmasters, int nslaves, int ntslaves, int op);
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib);
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+
+#else
+static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t cl)
+{
+}
+static inline void msm_bus_dbg_commit_data(const char *fabname,
+ void *cdata, int nmasters, int nslaves, int ntslaves,
+ int op)
+{
+}
+static inline void void msm_bus_dbg_remove_client
+ (const struct msm_bus_client_handle *pdata)
+{
+}
+
+static inline int
+msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib)
+{
+ return 0;
+}
+
+static inline int
+msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+{
+ return 0;
+}
+#endif
+
+#if 0
+//#ifdef CONFIG_CORESIGHT
+int msmbus_coresight_init(struct platform_device *pdev);
+void msmbus_coresight_remove(struct platform_device *pdev);
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node);
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
+#else
+static inline int msmbus_coresight_init(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static inline void msmbus_coresight_remove(struct platform_device *pdev)
+{
+}
+
+static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ return 0;
+}
+
+static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+}
+#endif
+
+
+#ifdef CONFIG_OF
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata);
+struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev);
+#else
+static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+ return;
+}
+
+static inline struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
new file mode 100644
index 0000000000000..d4c2068d9e3ae
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -0,0 +1,903 @@
+/* Copyright (c) 2010-2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+struct msm_bus_dbg_state {
+ uint32_t cl;
+ uint8_t enable;
+ uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+ const struct msm_bus_scale_pdata *pdata;
+ const struct msm_bus_client_handle *handle;
+ int index;
+ uint32_t clid;
+ int size;
+ struct dentry *file;
+ struct list_head list;
+ char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+ const char *name;
+ int size;
+ struct dentry *file;
+ struct list_head list;
+ char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+
+LIST_HEAD(fabdata_list);
+LIST_HEAD(cl_list);
+
+/**
+ * The following structures and funtions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+ {
+ .num_paths = ARRAY_SIZE(init_vectors),
+ .vectors = init_vectors,
+ },
+ {
+ .num_paths = ARRAY_SIZE(current_vectors),
+ .vectors = current_vectors,
+ },
+ {
+ .num_paths = ARRAY_SIZE(requested_vectors),
+ .vectors = requested_vectors,
+ },
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+ .usecase = shell_client_usecases,
+ .num_usecases = ARRAY_SIZE(shell_client_usecases),
+ .name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+ init_vectors[0].src = -1;
+ init_vectors[0].dst = -1;
+ init_vectors[0].ab = 0;
+ init_vectors[0].ib = 0;
+ current_vectors[0].src = -1;
+ current_vectors[0].dst = -1;
+ current_vectors[0].ab = 0;
+ current_vectors[0].ib = 0;
+ requested_vectors[0].src = -1;
+ requested_vectors[0].dst = -1;
+ requested_vectors[0].ab = 0;
+ requested_vectors[0].ib = 0;
+ clstate.enable = 0;
+ clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+ int ret = 0;
+
+ if (clstate.current_index < 2)
+ clstate.current_index = 2;
+ else {
+ clstate.current_index = 1;
+ current_vectors[0].ab = requested_vectors[0].ab;
+ current_vectors[0].ib = requested_vectors[0].ib;
+ }
+
+ if (clstate.enable) {
+ MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+ clstate.current_index);
+ ret = msm_bus_scale_client_update_request(clstate.cl,
+ clstate.current_index);
+ } else
+ MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+ return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+ MSM_BUS_DBG("Unregistering shell client\n");
+ msm_bus_scale_unregister_client(clstate.cl);
+ clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+ int ret = 0;
+
+ if (init_vectors[0].src != requested_vectors[0].src) {
+ MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+ msm_bus_dbg_unregister_client(clstate.cl);
+ }
+ if (init_vectors[0].dst != requested_vectors[0].dst) {
+ MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+ msm_bus_dbg_unregister_client(clstate.cl);
+ }
+
+ current_vectors[0].src = init_vectors[0].src;
+ requested_vectors[0].src = init_vectors[0].src;
+ current_vectors[0].dst = init_vectors[0].dst;
+ requested_vectors[0].dst = init_vectors[0].dst;
+
+ if (!clstate.enable) {
+ MSM_BUS_DBG("Enable bit not set, skipping registration: cl "
+ "%d\n", clstate.cl);
+ return 0;
+ }
+
+ if (clstate.cl) {
+ MSM_BUS_DBG("Client registered, skipping registration\n");
+ return clstate.cl;
+ }
+
+ MSM_BUS_DBG("Registering shell client\n");
+ ret = msm_bus_scale_register_client(&shell_client);
+ return ret;
+}
+
+static int msm_bus_dbg_mas_get(void *data, u64 *val)
+{
+ *val = init_vectors[0].src;
+ MSM_BUS_DBG("Get master: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_mas_set(void *data, u64 val)
+{
+ init_vectors[0].src = val;
+ MSM_BUS_DBG("Set master: %llu\n", val);
+ clstate.cl = msm_bus_dbg_register_client();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+ msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void *data, u64 *val)
+{
+ *val = init_vectors[0].dst;
+ MSM_BUS_DBG("Get slave: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_slv_set(void *data, u64 val)
+{
+ init_vectors[0].dst = val;
+ MSM_BUS_DBG("Set slave: %llu\n", val);
+ clstate.cl = msm_bus_dbg_register_client();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+ msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void *data, u64 *val)
+{
+ *val = requested_vectors[0].ab;
+ MSM_BUS_DBG("Get ab: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_ab_set(void *data, u64 val)
+{
+ requested_vectors[0].ab = val;
+ MSM_BUS_DBG("Set ab: %llu\n", val);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+ msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void *data, u64 *val)
+{
+ *val = requested_vectors[0].ib;
+ MSM_BUS_DBG("Get ib: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_ib_set(void *data, u64 val)
+{
+ requested_vectors[0].ib = val;
+ MSM_BUS_DBG("Set ib: %llu\n", val);
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+ msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void *data, u64 *val)
+{
+ *val = clstate.enable;
+ MSM_BUS_DBG("Get enable: %llu\n", *val);
+ return 0;
+}
+
+static int msm_bus_dbg_en_set(void *data, u64 val)
+{
+ int ret = 0;
+
+ clstate.enable = val;
+ if (clstate.enable) {
+ if (!clstate.cl) {
+ MSM_BUS_DBG("client: %u\n", clstate.cl);
+ clstate.cl = msm_bus_dbg_register_client();
+ if (clstate.cl)
+ ret = msm_bus_dbg_update_cl_request(clstate.cl);
+ } else {
+ MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+ ret = msm_bus_dbg_update_cl_request(clstate.cl);
+ }
+ }
+
+ MSM_BUS_DBG("Set enable: %llu\n", val);
+ return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+ msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following funtions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int bsize = 0;
+ uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+ struct msm_bus_cldata *cldata = NULL;
+ const struct msm_bus_client_handle *handle = file->private_data;
+ int found = 0;
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if ((cldata->clid == cl) ||
+ (cldata->handle && (cldata->handle == handle))) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ bsize = cldata->size;
+ return simple_read_from_buffer(buf, count, ppos,
+ cldata->buffer, bsize);
+}
+
+static int client_data_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations client_data_fops = {
+ .open = client_data_open,
+ .read = client_data_read,
+};
+
+struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+ struct dentry *dent, uint32_t clid)
+{
+ if (dent == NULL) {
+ MSM_BUS_DBG("debugfs not ready yet\n");
+ return NULL;
+ }
+ return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+ &client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+ struct msm_bus_cldata *cldata;
+
+ cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+ if (!cldata) {
+ MSM_BUS_DBG("Failed to allocate memory for client data\n");
+ return -ENOMEM;
+ }
+ cldata->handle = pdata;
+ list_add_tail(&cldata->list, &cl_list);
+ return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+ u64 ab, u64 ib)
+{
+ struct msm_bus_cldata *cldata;
+ int i;
+ struct timespec ts;
+ bool found = false;
+ char *buf = NULL;
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->handle == pdata) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ if (cldata->file == NULL) {
+ if (pdata->name == NULL) {
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ return -EINVAL;
+ }
+ pr_err("\n%s setting up debugfs %s", __func__, pdata->name);
+ cldata->file = debugfs_create_file(pdata->name, S_IRUGO,
+ clients, (void *)pdata, &client_data_fops);
+ }
+
+ if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+ i = cldata->size;
+ else {
+ i = 0;
+ cldata->size = 0;
+ }
+ buf = cldata->buffer;
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n",
+ (int)ts.tv_sec, (int)ts.tv_nsec);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->mas);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ", pdata->slv);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ab);
+
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : ");
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ", ib);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+ cldata->size = i;
+
+ trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+ pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+ return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+ struct msm_bus_cldata *cldata = NULL;
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->handle == pdata) {
+ debugfs_remove(cldata->file);
+ list_del(&cldata->list);
+ kfree(cldata);
+ break;
+ }
+ }
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t clid, struct dentry *file)
+{
+ struct msm_bus_cldata *cldata;
+
+ cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+ if (!cldata) {
+ MSM_BUS_DBG("Failed to allocate memory for client data\n");
+ return -ENOMEM;
+ }
+ cldata->pdata = pdata;
+ cldata->index = index;
+ cldata->clid = clid;
+ cldata->file = file;
+ cldata->size = 0;
+ list_add_tail(&cldata->list, &cl_list);
+ return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+ struct msm_bus_cldata *cldata = NULL;
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->clid == clid) {
+ debugfs_remove(cldata->file);
+ list_del(&cldata->list);
+ kfree(cldata);
+ break;
+ }
+ }
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+ int index, uint32_t clid)
+{
+ int i = 0, j;
+ char *buf = NULL;
+ struct msm_bus_cldata *cldata = NULL;
+ struct timespec ts;
+ int found = 0;
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->clid == clid) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return -ENOENT;
+
+ if (cldata->file == NULL) {
+ if (pdata->name == NULL) {
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ return -EINVAL;
+ }
+ cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO,
+ clients, clid);
+ }
+
+ if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+ i = cldata->size;
+ else {
+ i = 0;
+ cldata->size = 0;
+ }
+ buf = cldata->buffer;
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n",
+ (int)ts.tv_sec, (int)ts.tv_nsec);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr : %d\n", index);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ",
+ pdata->usecase[index].vectors[j].src);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d ",
+ pdata->usecase[index].vectors[j].dst);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ pdata->usecase[index].vectors[j].ab);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib : ");
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu ",
+ pdata->usecase[index].vectors[j].ib);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+ for (j = 0; j < pdata->usecase->num_paths; j++)
+ trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+ pdata->name,
+ pdata->usecase[index].vectors[j].src,
+ pdata->usecase[index].vectors[j].dst,
+ pdata->usecase[index].vectors[j].ab,
+ pdata->usecase[index].vectors[j].ib);
+
+ cldata->index = index;
+ cldata->size = i;
+ return i;
+}
+
+static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index)
+{
+ int ret = 0;
+
+ if ((index < 0) || (index > cldata->pdata->num_usecases)) {
+ MSM_BUS_DBG("Invalid index!\n");
+ return -EINVAL;
+ }
+ ret = msm_bus_scale_client_update_request(cldata->clid, index);
+ return ret;
+}
+
+static ssize_t msm_bus_dbg_update_request_write(struct file *file,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct msm_bus_cldata *cldata;
+ unsigned long index = 0;
+ int ret = 0;
+ char *chid;
+ char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+ int found = 0;
+
+ if (!buf || IS_ERR(buf)) {
+ MSM_BUS_ERR("Memory allocation for buffer failed\n");
+ return -ENOMEM;
+ }
+ if (cnt == 0)
+ return 0;
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+ buf[cnt] = '\0';
+ chid = buf;
+ MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (strnstr(chid, cldata->pdata->name, cnt)) {
+ found = 1;
+ cldata = cldata;
+ strsep(&chid, " ");
+ if (chid) {
+ ret = kstrtoul(chid, 10, &index);
+ if (ret) {
+ MSM_BUS_DBG("Index conversion"
+ " failed\n");
+ return -EFAULT;
+ }
+ } else {
+ MSM_BUS_DBG("Error parsing input. Index not"
+ " found\n");
+ found = 0;
+ }
+ break;
+ }
+ }
+
+ if (found)
+ msm_bus_dbg_update_request(cldata, index);
+ kfree(buf);
+ return cnt;
+}
+
+/**
+ * The following funtions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_bus_fab_list *fablist = NULL;
+ int bsize = 0;
+ ssize_t ret;
+ const char *name = file->private_data;
+ int found = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, name) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -ENOENT;
+ bsize = fablist->size;
+ ret = simple_read_from_buffer(buf, count, ppos,
+ fablist->buffer, bsize);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+ .open = client_data_open,
+ .read = fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t ret;
+ memset(rules_buf, 0, MAX_BUFF_SIZE);
+ print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+ ret = simple_read_from_buffer(buf, count, ppos,
+ rules_buf, MAX_BUFF_SIZE);
+ return ret;
+}
+
+static int rules_dbg_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static const struct file_operations rules_dbg_fops = {
+ .open = rules_dbg_open,
+ .read = rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+ struct msm_bus_fab_list *fablist;
+ int ret = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+ if (!fablist) {
+ MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ fablist->name = fabname;
+ fablist->size = 0;
+ list_add_tail(&fablist->list, &fabdata_list);
+err:
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+ struct msm_bus_fab_list *fablist = NULL;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, fabname) == 0) {
+ debugfs_remove(fablist->file);
+ list_del(&fablist->list);
+ kfree(fablist);
+ break;
+ }
+ }
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+ void *cdata, int nmasters, int nslaves,
+ int ntslaves)
+{
+ int i;
+ char *buf = NULL;
+ struct msm_bus_fab_list *fablist = NULL;
+ struct timespec ts;
+ int found = 0;
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ if (strcmp(fablist->name, fabname) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -ENOENT;
+
+ if (fablist->file == NULL) {
+ MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return -EFAULT;
+ }
+
+ if (fablist->size < MAX_BUFF_SIZE - 256)
+ i = fablist->size;
+ else {
+ i = 0;
+ fablist->size = 0;
+ }
+ buf = fablist->buffer;
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ ts = ktime_to_timespec(ktime_get());
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%d.%d\n",
+ (int)ts.tv_sec, (int)ts.tv_nsec);
+
+ msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+ nmasters, nslaves, ntslaves);
+ i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ fablist->size = i;
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+ return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+ .open = client_data_open,
+ .write = msm_bus_dbg_update_request_write,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+ uint32_t clid)
+{
+ struct dentry *file = NULL;
+
+ if (index == MSM_BUS_DBG_REGISTER) {
+ msm_bus_dbg_record_client(pdata, index, clid, file);
+ if (!pdata->name) {
+ MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+ return;
+ }
+ } else if (index == MSM_BUS_DBG_UNREGISTER) {
+ msm_bus_dbg_free_client(clid);
+ MSM_BUS_DBG("Client %d unregistered\n", clid);
+ } else
+ msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+ int nmasters, int nslaves, int ntslaves, int op)
+{
+ struct dentry *file = NULL;
+
+ if (op == MSM_BUS_DBG_REGISTER)
+ msm_bus_dbg_record_fabric(fabname, file);
+ else if (op == MSM_BUS_DBG_UNREGISTER)
+ msm_bus_dbg_free_fabric(fabname);
+ else
+ msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+ nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+ struct dentry *commit, *shell_client, *rules_dbg;
+ struct msm_bus_fab_list *fablist;
+ struct msm_bus_cldata *cldata = NULL;
+ uint64_t val = 0;
+
+ dir = debugfs_create_dir("msm-bus-dbg", NULL);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+ goto err;
+ }
+
+ clients = debugfs_create_dir("client-data", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create clients\n");
+ goto err;
+ }
+
+ shell_client = debugfs_create_dir("shell-client", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create clients\n");
+ goto err;
+ }
+
+ commit = debugfs_create_dir("commit-data", dir);
+ if ((!dir) || IS_ERR(dir)) {
+ MSM_BUS_ERR("Couldn't create commit\n");
+ goto err;
+ }
+
+ rules_dbg = debugfs_create_dir("rules-dbg", dir);
+ if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+ MSM_BUS_ERR("Couldn't create rules-dbg\n");
+ goto err;
+ }
+
+ if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR,
+ rules_dbg, &val, &rules_dbg_fops) == NULL)
+ goto err;
+
+ if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR,
+ shell_client, &val, &shell_client_en_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val,
+ &shell_client_ib_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val,
+ &shell_client_ab_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client,
+ &val, &shell_client_slv_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client,
+ &val, &shell_client_mas_fops) == NULL)
+ goto err;
+ if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR,
+ clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+ goto err;
+
+ rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+ if (!rules_buf) {
+ MSM_BUS_ERR("Failed to alloc rules_buf");
+ goto err;
+ }
+
+ list_for_each_entry(cldata, &cl_list, list) {
+ if (cldata->pdata) {
+ if (cldata->pdata->name == NULL) {
+ MSM_BUS_DBG("Client name not found\n");
+ continue;
+ }
+ cldata->file = msm_bus_dbg_create(cldata->
+ pdata->name, S_IRUGO, clients, cldata->clid);
+ } else if (cldata->handle) {
+ if (cldata->handle->name == NULL) {
+ MSM_BUS_DBG("Client doesn't have a name\n");
+ continue;
+ }
+ cldata->file = debugfs_create_file(cldata->handle->name,
+ S_IRUGO, clients,
+ (void *)cldata->handle,
+ &client_data_fops);
+ }
+ }
+
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry(fablist, &fabdata_list, list) {
+ fablist->file = debugfs_create_file(fablist->name, S_IRUGO,
+ commit, (void *)fablist->name, &fabric_data_fops);
+ if (fablist->file == NULL) {
+ MSM_BUS_DBG("Cannot create files for commit data\n");
+ kfree(rules_buf);
+ goto err;
+ }
+ }
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+ msm_bus_dbg_init_vectors();
+ return 0;
+err:
+ debugfs_remove_recursive(dir);
+ return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+ struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+ struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+ debugfs_remove_recursive(dir);
+ list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+ list_del(&cldata->list);
+ kfree(cldata);
+ }
+ mutex_lock(&msm_bus_dbg_fablist_lock);
+ list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+ list_del(&fablist->list);
+ kfree(fablist);
+ }
+ kfree(rules_buf);
+ mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Gagan Mac <gmac@codeaurora.org>");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
new file mode 100644
index 0000000000000..2714d8a42399f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -0,0 +1,589 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is Mree software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_adhoc.h"
+
+struct msm_bus_floor_client_type {
+ int mas_id;
+ int slv_id;
+ struct msm_bus_client_handle *vote_handle;
+ struct device *dev;
+ u64 cur_vote_hz;
+ int active_only;
+};
+
+static struct class *bus_floor_class;
+#define MAX_VOTER_NAME (50)
+#define DEFAULT_NODE_WIDTH (8)
+#define DBG_NAME(s) (strnstr(s, "-", 7) + 1)
+
+static int get_id(void)
+{
+ static int dev_id = MSM_BUS_INT_TEST_ID;
+ int id = dev_id;
+
+ if (id >= MSM_BUS_INT_TEST_LAST)
+ id = -EINVAL;
+ else
+ dev_id++;
+
+ return id;
+}
+
+static ssize_t bus_floor_active_only_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct msm_bus_floor_client_type *cl;
+
+ cl = dev_get_drvdata(dev);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ return 0;
+ }
+ return snprintf(buf, sizeof(int), "%d", cl->active_only);
+}
+
+static ssize_t bus_floor_active_only_store(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf,
+ size_t n)
+{
+ struct msm_bus_floor_client_type *cl;
+
+ cl = dev_get_drvdata(dev);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ return 0;
+ }
+
+ if (sscanf(buf, "%d", &cl->active_only) != 1) {
+ pr_err("%s:return error", __func__);
+ return -EINVAL;
+ }
+
+ return n;
+}
+
+static ssize_t bus_floor_vote_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ struct msm_bus_floor_client_type *cl;
+
+ cl = dev_get_drvdata(dev);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ return 0;
+ }
+ return snprintf(buf, sizeof(u64), "%llu", cl->cur_vote_hz);
+}
+
+static ssize_t bus_floor_vote_store(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf,
+ size_t n)
+{
+ struct msm_bus_floor_client_type *cl;
+ int ret = 0;
+
+ cl = dev_get_drvdata(dev);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ return 0;
+ }
+
+ if (sscanf(buf, "%llu", &cl->cur_vote_hz) != 1) {
+ pr_err("%s:return error", __func__);
+ return -EINVAL;
+ }
+
+ ret = msm_bus_floor_vote_context(dev_name(dev), cl->cur_vote_hz,
+ cl->active_only);
+ return n;
+}
+
+static ssize_t bus_floor_vote_store_api(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf,
+ size_t n)
+{
+ struct msm_bus_floor_client_type *cl;
+ int ret = 0;
+ char name[10];
+ u64 vote_khz = 0;
+
+ cl = dev_get_drvdata(dev);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ return 0;
+ }
+
+ if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+ pr_err("%s:return error", __func__);
+ return -EINVAL;
+ }
+
+ pr_info("%s: name %s vote %llu\n",
+ __func__, name, vote_khz);
+
+ ret = msm_bus_floor_vote(name, vote_khz);
+ return n;
+}
+
+static DEVICE_ATTR(floor_vote, S_IRUGO | S_IWUSR,
+ bus_floor_vote_show, bus_floor_vote_store);
+
+static DEVICE_ATTR(floor_vote_api, S_IRUGO | S_IWUSR,
+ bus_floor_vote_show, bus_floor_vote_store_api);
+
+static DEVICE_ATTR(floor_active_only, S_IRUGO | S_IWUSR,
+ bus_floor_active_only_show, bus_floor_active_only_store);
+
+static int msm_bus_floor_init_dev(struct device *fab_dev,
+ struct device *dev, bool is_master)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_node_device_type *fab_node = NULL;
+ struct msm_bus_node_info_type *node_info = NULL;
+ int ret = 0;
+
+ if (!(fab_dev && dev)) {
+ ret = -ENXIO;
+ goto exit_init_bus_dev;
+ }
+
+ fab_node = fab_dev->platform_data;
+
+ if (!fab_node) {
+ pr_info("\n%s: Can't create device", __func__);
+ ret = -ENXIO;
+ goto exit_init_bus_dev;
+ }
+
+ device_initialize(dev);
+ bus_node = devm_kzalloc(dev,
+ sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+
+ if (!bus_node) {
+ pr_err("%s:Bus node alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_init_bus_dev;
+ }
+
+ node_info = devm_kzalloc(dev,
+ sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+
+ if (!node_info) {
+ pr_err("%s:Bus node info alloc failed\n", __func__);
+ devm_kfree(dev, bus_node);
+ ret = -ENOMEM;
+ goto exit_init_bus_dev;
+ }
+
+ bus_node->node_info = node_info;
+ bus_node->ap_owned = true;
+ bus_node->node_info->bus_device = fab_dev;
+ bus_node->node_info->buswidth = 8;
+ dev->platform_data = bus_node;
+ dev->bus = &msm_bus_type;
+
+ bus_node->node_info->id = get_id();
+ if (bus_node->node_info->id < 0) {
+ pr_err("%s: Failed to get id for dev. Bus:%s is_master:%d",
+ __func__, fab_node->node_info->name, is_master);
+ ret = -ENXIO;
+ goto exit_init_bus_dev;
+ }
+
+ dev_set_name(dev, "testnode-%s-%s", (is_master ? "mas" : "slv"),
+ fab_node->node_info->name);
+
+ ret = device_add(dev);
+ if (ret < 0) {
+ pr_err("%s: Failed to add %s", __func__, dev_name(dev));
+ goto exit_init_bus_dev;
+ }
+
+exit_init_bus_dev:
+ return ret;
+}
+
+static int msm_bus_floor_show_info(struct device *dev, void *data)
+{
+ if (dev)
+ pr_err(" %s\n", dev_name(dev));
+ return 0;
+}
+
+static void msm_bus_floor_pr_usage(void)
+{
+ pr_err("msm_bus_floor_vote: Supported buses\n");
+ class_for_each_device(bus_floor_class, NULL, NULL,
+ msm_bus_floor_show_info);
+}
+
+static int msm_bus_floor_match(struct device *dev, const void *data)
+{
+ int ret = 0;
+
+ if (!(dev && data))
+ return ret;
+
+ if (strnstr(dev_name(dev), data, MAX_VOTER_NAME))
+ ret = 1;
+
+ return ret;
+}
+
+int msm_bus_floor_vote(const char *name, u64 floor_hz)
+{
+ int ret = -EINVAL;
+ struct msm_bus_floor_client_type *cl;
+ bool found = false;
+ struct device *dbg_voter = NULL;
+
+ if (!name) {
+ pr_err("%s: NULL name", __func__);
+ return -EINVAL;
+ }
+
+ dbg_voter = class_find_device(bus_floor_class, NULL,
+ name, msm_bus_floor_match);
+ if (dbg_voter) {
+ found = true;
+ cl = dev_get_drvdata(dbg_voter);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ goto exit_bus_floor_vote;
+ }
+
+ if (!cl->vote_handle) {
+ char cl_name[MAX_VOTER_NAME];
+ snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter",
+ dev_name(cl->dev));
+ cl->vote_handle = msm_bus_scale_register(cl->mas_id,
+ cl->slv_id, cl_name, false);
+ if (!cl->vote_handle) {
+ ret = -ENXIO;
+ goto exit_bus_floor_vote;
+ }
+ }
+
+ cl->cur_vote_hz = floor_hz;
+ ret = msm_bus_scale_update_bw(cl->vote_handle, 0,
+ (floor_hz * DEFAULT_NODE_WIDTH));
+ if (ret) {
+ pr_err("%s: Failed to update %s", __func__,
+ name);
+ goto exit_bus_floor_vote;
+ }
+ } else {
+ pr_err("\n%s:No matching voting device found for %s", __func__,
+ name);
+ msm_bus_floor_pr_usage();
+ }
+
+exit_bus_floor_vote:
+ if (dbg_voter)
+ put_device(dbg_voter);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_bus_floor_vote);
+
+int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+ bool active_only)
+{
+ int ret = -EINVAL;
+ struct msm_bus_floor_client_type *cl;
+ bool found = false;
+ struct device *dbg_voter = NULL;
+
+ if (!name) {
+ pr_err("%s: NULL name", __func__);
+ return -EINVAL;
+ }
+
+ dbg_voter = class_find_device(bus_floor_class, NULL,
+ name, msm_bus_floor_match);
+ if (dbg_voter) {
+ found = true;
+ cl = dev_get_drvdata(dbg_voter);
+
+ if (!cl) {
+ pr_err("%s: Can't find cl", __func__);
+ goto exit_bus_floor_vote_context;
+ }
+
+ if (!(cl->vote_handle &&
+ (cl->vote_handle->active_only == active_only))) {
+ char cl_name[MAX_VOTER_NAME];
+
+ if (cl->vote_handle)
+ msm_bus_scale_unregister(cl->vote_handle);
+
+ snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter",
+ dev_name(cl->dev));
+ cl->vote_handle = msm_bus_scale_register(cl->mas_id,
+ cl->slv_id, (char *)dev_name(cl->dev),
+ active_only);
+ if (!cl->vote_handle) {
+ ret = -ENXIO;
+ goto exit_bus_floor_vote_context;
+ }
+ }
+
+ cl->cur_vote_hz = floor_hz;
+ ret = msm_bus_scale_update_bw(cl->vote_handle, 0,
+ (floor_hz * DEFAULT_NODE_WIDTH));
+ if (ret) {
+ pr_err("%s: Failed to update %s", __func__,
+ name);
+ goto exit_bus_floor_vote_context;
+ }
+ } else {
+ pr_err("\n%s:No matching voting device found for %s", __func__,
+ name);
+ msm_bus_floor_pr_usage();
+ }
+
+exit_bus_floor_vote_context:
+ if (dbg_voter)
+ put_device(dbg_voter);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_bus_floor_vote_context);
+
+static int msm_bus_floor_setup_dev_conn(struct device *mas, struct device *slv)
+{
+ int ret = 0;
+ int slv_id = 0;
+ struct msm_bus_node_device_type *mas_node = NULL;
+ struct msm_bus_node_device_type *slv_node = NULL;
+
+ if (!(mas && slv)) {
+ pr_err("\n%s: Invalid master/slave device", __func__);
+ ret = -ENXIO;
+ goto exit_setup_dev_conn;
+ }
+
+ mas_node = mas->platform_data;
+ slv_node = slv->platform_data;
+
+ if (!(mas_node && slv_node)) {
+ ret = -ENXIO;
+ goto exit_setup_dev_conn;
+ }
+
+ slv_id = slv_node->node_info->id;
+ mas_node->node_info->num_connections = 1;
+ mas_node->node_info->connections = devm_kzalloc(mas,
+ (sizeof(int) * mas_node->node_info->num_connections),
+ GFP_KERNEL);
+
+ if (!mas_node->node_info->connections) {
+ pr_err("%s:Bus node connections info alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_setup_dev_conn;
+ }
+
+ mas_node->node_info->dev_connections = devm_kzalloc(mas,
+ (sizeof(struct device *) *
+ mas_node->node_info->num_connections),
+ GFP_KERNEL);
+
+ if (!mas_node->node_info->dev_connections) {
+ pr_err("%s:Bus node dev connections info alloc failed\n",
+ __func__);
+ ret = -ENOMEM;
+ goto exit_setup_dev_conn;
+ }
+ mas_node->node_info->connections[0] = slv_id;
+ mas_node->node_info->dev_connections[0] = slv;
+
+exit_setup_dev_conn:
+ return ret;
+}
+
+static int msm_bus_floor_setup_floor_dev(
+ struct msm_bus_node_device_type *mas_node,
+ struct msm_bus_node_device_type *slv_node,
+ struct msm_bus_node_device_type *bus_node)
+{
+ struct msm_bus_floor_client_type *cl_ptr = NULL;
+ int ret = 0;
+ char *name = NULL;
+
+ cl_ptr = kzalloc(sizeof(struct msm_bus_floor_client_type), GFP_KERNEL);
+ if (!cl_ptr) {
+ pr_err("\n%s: Err Allocating mem for cl ptr bus %d",
+ __func__, bus_node->node_info->id);
+ ret = -ENOMEM;
+ goto err_setup_floor_dev;
+ }
+
+ if (!bus_floor_class) {
+ bus_floor_class = class_create(THIS_MODULE, "bus-voter");
+ if (IS_ERR(bus_floor_class)) {
+ ret = -ENXIO;
+ pr_err("%s: Error creating dev class", __func__);
+ goto err_setup_floor_dev;
+ }
+ }
+
+ name = DBG_NAME(bus_node->node_info->name);
+ if (!name) {
+ pr_err("%s: Invalid name derived for %s", __func__,
+ bus_node->node_info->name);
+ ret = -EINVAL;
+ goto err_setup_floor_dev;
+ }
+
+ cl_ptr->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!cl_ptr->dev) {
+ pr_err("%s: Failed to create device bus %d", __func__,
+ bus_node->node_info->id);
+ goto err_setup_floor_dev;
+ }
+
+ device_initialize(cl_ptr->dev);
+ cl_ptr->dev->class = bus_floor_class;
+ dev_set_name(cl_ptr->dev, "%s", name);
+ dev_set_drvdata(cl_ptr->dev, cl_ptr);
+ ret = device_add(cl_ptr->dev);
+
+ if (ret < 0) {
+ pr_err("%s: Failed to add device bus %d", __func__,
+ bus_node->node_info->id);
+ goto err_setup_floor_dev;
+ }
+
+ cl_ptr->mas_id = mas_node->node_info->id;
+ cl_ptr->slv_id = slv_node->node_info->id;
+
+ ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote);
+ if (ret < 0)
+ goto err_setup_floor_dev;
+
+ ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote_api);
+ if (ret < 0)
+ goto err_setup_floor_dev;
+
+ ret = device_create_file(cl_ptr->dev, &dev_attr_floor_active_only);
+ if (ret < 0)
+ goto err_setup_floor_dev;
+
+ return ret;
+
+err_setup_floor_dev:
+ kfree(cl_ptr);
+ return ret;
+}
+
+int msm_bus_floor_init(struct device *dev)
+{
+ struct device *m_dev = NULL;
+ struct device *s_dev = NULL;
+ struct msm_bus_node_device_type *mas_node = NULL;
+ struct msm_bus_node_device_type *slv_node = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int ret = 0;
+
+ if (!dev) {
+ pr_info("\n%s: Can't create voting client", __func__);
+ ret = -ENXIO;
+ goto exit_floor_init;
+ }
+
+ bus_node = dev->platform_data;
+ if (!(bus_node && bus_node->node_info->is_fab_dev)) {
+ pr_info("\n%s: Can't create voting client, not a fab device",
+ __func__);
+ ret = -ENXIO;
+ goto exit_floor_init;
+ }
+
+ m_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!m_dev) {
+ pr_err("%s:Master Device alloc failed\n", __func__);
+ m_dev = NULL;
+ ret = -ENOMEM;
+ goto exit_floor_init;
+ }
+
+ s_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!m_dev) {
+ pr_err("%s:Slave Device alloc failed\n", __func__);
+ s_dev = NULL;
+ kfree(m_dev);
+ ret = -ENOMEM;
+ m_dev = NULL;
+ goto exit_floor_init;
+ }
+
+ ret = msm_bus_floor_init_dev(dev, m_dev, true);
+ if (ret) {
+ pr_err("\n%s: Error setting up master dev, bus %d",
+ __func__, bus_node->node_info->id);
+ kfree(m_dev);
+ kfree(s_dev);
+ goto exit_floor_init;
+ }
+
+ ret = msm_bus_floor_init_dev(dev, s_dev, false);
+ if (ret) {
+ pr_err("\n%s: Error setting up slave dev, bus %d",
+ __func__, bus_node->node_info->id);
+ kfree(m_dev);
+ kfree(s_dev);
+ goto exit_floor_init;
+ }
+
+ ret = msm_bus_floor_setup_dev_conn(m_dev, s_dev);
+ if (ret) {
+ pr_err("\n%s: Error setting up connections bus %d",
+ __func__, bus_node->node_info->id);
+ goto err_floor_init;
+ }
+
+ mas_node = m_dev->platform_data;
+ slv_node = s_dev->platform_data;
+ if ((!(mas_node && slv_node))) {
+ pr_err("\n%s: Error getting mas/slv nodes bus %d",
+ __func__, bus_node->node_info->id);
+ goto err_floor_init;
+ }
+
+ ret = msm_bus_floor_setup_floor_dev(mas_node, slv_node, bus_node);
+ if (ret) {
+ pr_err("\n%s: Error getting mas/slv nodes bus %d",
+ __func__, bus_node->node_info->id);
+ goto err_floor_init;
+ }
+
+exit_floor_init:
+ return ret;
+err_floor_init:
+ device_unregister(m_dev);
+ device_unregister(s_dev);
+ kfree(m_dev);
+ kfree(s_dev);
+ return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric.c
new file mode 100644
index 0000000000000..b38b35197d8ab
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric.c
@@ -0,0 +1,970 @@
+/* Copyright (c) 2010-2014, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/radix-tree.h>
+#include "msm_bus_core.h"
+
+enum {
+ DISABLE,
+ ENABLE,
+};
+
+/**
+ * msm_bus_fabric_add_node() - Add a node to the fabric structure
+ * @fabric: Fabric device to which the node should be added
+ * @info: The node to be added
+ */
+static int msm_bus_fabric_add_node(struct msm_bus_fabric *fabric,
+ struct msm_bus_inode_info *info)
+{
+ int status = -ENOMEM, ctx;
+ MSM_BUS_DBG("msm_bus_fabric_add_node: ID %d Gw: %d\n",
+ info->node_info->priv_id, info->node_info->gateway);
+ status = radix_tree_preload(GFP_ATOMIC);
+ if (status)
+ goto out;
+
+ status = radix_tree_insert(&fabric->fab_tree, info->node_info->priv_id,
+ info);
+ radix_tree_preload_end();
+ if (IS_SLAVE(info->node_info->priv_id))
+ radix_tree_tag_set(&fabric->fab_tree, info->node_info->priv_id,
+ SLAVE_NODE);
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (info->node_info->slaveclk[ctx]) {
+ radix_tree_tag_set(&fabric->fab_tree,
+ info->node_info->priv_id, CLK_NODE);
+ break;
+ }
+
+ info->nodeclk[ctx].enable = false;
+ info->nodeclk[ctx].dirty = false;
+ }
+
+ if (info->node_info->nr_lim) {
+ int iid = msm_bus_board_get_iid(info->node_info->id);
+ struct msm_bus_fabric_device *fabdev =
+ msm_bus_get_fabric_device(GET_FABID(iid));
+
+ if (!fabdev)
+ BUG_ON(1);
+
+ radix_tree_tag_set(&fabric->fab_tree,
+ info->node_info->priv_id, MASTER_NODE);
+
+ fabdev->num_nr_lim++;
+ MSM_BUS_ERR("%s: Adding %d There are %d nodes", __func__,
+ info->node_info->id, fabdev->num_nr_lim);
+ }
+out:
+ return status;
+}
+
+/**
+ * msm_bus_add_fab() - Add a fabric (gateway) to the current fabric
+ * @fabric: Fabric device to which the gateway info should be added
+ * @info: Gateway node to be added to the fabric
+ */
+static int msm_bus_fabric_add_fab(struct msm_bus_fabric *fabric,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_fabnodeinfo *fabnodeinfo;
+ MSM_BUS_DBG("msm_bus_fabric_add_fab: ID %d Gw: %d\n",
+ info->node_info->priv_id, info->node_info->gateway);
+ fabnodeinfo = kzalloc(sizeof(struct msm_bus_fabnodeinfo), GFP_KERNEL);
+ if (fabnodeinfo == NULL) {
+ MSM_FAB_ERR("msm_bus_fabric_add_fab: "
+ "No Node Info\n");
+ MSM_FAB_ERR("axi: Cannot register fabric!\n");
+ return -ENOMEM;
+ }
+
+ fabnodeinfo->info = info;
+ fabnodeinfo->info->num_pnodes = -1;
+ list_add_tail(&fabnodeinfo->list, &fabric->gateways);
+ return 0;
+}
+
+/**
+ * register_fabric_info() - Create the internal fabric structure and
+ * build the topology tree from platform specific data
+ * @pdev: Platform device for getting base addresses
+ * @fabric: Fabric to which the gateways, nodes should be added
+ *
+ * This function is called from probe. Iterates over the platform data,
+ * and builds the topology
+ */
+static int register_fabric_info(struct platform_device *pdev,
+ struct msm_bus_fabric *fabric)
+{
+ int i = 0, ret = 0, err = 0;
+
+ MSM_BUS_DBG("id:%d pdata-id: %d len: %d\n", fabric->fabdev.id,
+ fabric->pdata->id, fabric->pdata->len);
+ fabric->hw_data = fabric->fabdev.hw_algo.allocate_hw_data(pdev,
+ fabric->pdata);
+ if (ZERO_OR_NULL_PTR(fabric->hw_data) && fabric->pdata->ahb == 0) {
+ MSM_BUS_ERR("Couldn't allocate hw_data for fab: %d\n",
+ fabric->fabdev.id);
+ goto error;
+ }
+
+ for (i = 0; i < fabric->pdata->len; i++) {
+ struct msm_bus_inode_info *info;
+ int ctx, j;
+
+ info = kzalloc(sizeof(struct msm_bus_inode_info), GFP_KERNEL);
+ if (info == NULL) {
+ MSM_BUS_ERR("Error allocating info\n");
+ return -ENOMEM;
+ }
+
+ info->node_info = fabric->pdata->info + i;
+ info->commit_index = -1;
+ info->num_pnodes = -1;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (info->node_info->slaveclk[ctx]) {
+ info->nodeclk[ctx].clk = clk_get_sys("msm_bus",
+ info->node_info->slaveclk[ctx]);
+ if (IS_ERR(info->nodeclk[ctx].clk)) {
+ MSM_BUS_ERR("Couldn't get clk %s\n",
+ info->node_info->slaveclk[ctx]);
+ err = -EINVAL;
+ }
+ info->nodeclk[ctx].enable = false;
+ info->nodeclk[ctx].dirty = false;
+ }
+
+ if (info->node_info->memclk[ctx]) {
+ info->memclk[ctx].clk = clk_get_sys("msm_bus",
+ info->node_info->memclk[ctx]);
+ if (IS_ERR(info->memclk[ctx].clk)) {
+ MSM_BUS_ERR("Couldn't get clk %s\n",
+ info->node_info->memclk[ctx]);
+ err = -EINVAL;
+ }
+ info->memclk[ctx].enable = false;
+ info->memclk[ctx].dirty = false;
+ }
+ }
+
+ if (info->node_info->iface_clk_node) {
+ info->iface_clk.clk = clk_get_sys(info->node_info->
+ iface_clk_node, "iface_clk");
+ if (IS_ERR(info->iface_clk.clk)) {
+ MSM_BUS_ERR("ERR: Couldn't get clk %s\n",
+ info->node_info->iface_clk_node);
+ }
+ }
+
+ ret = info->node_info->gateway ?
+ msm_bus_fabric_add_fab(fabric, info) :
+ msm_bus_fabric_add_node(fabric, info);
+ if (ret) {
+ MSM_BUS_ERR("Unable to add node info, ret: %d\n", ret);
+ kfree(info);
+ goto error;
+ }
+
+ if (fabric->fabdev.hw_algo.node_init == NULL)
+ continue;
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Enabled iface clock for node init: %d\n",
+ info->node_info->priv_id);
+ clk_prepare_enable(info->iface_clk.clk);
+ }
+
+ for (j = 0; j < NUM_CTX; j++)
+ clk_prepare_enable(fabric->info.nodeclk[j].clk);
+
+ fabric->fabdev.hw_algo.node_init(fabric->hw_data, info);
+ if (ret) {
+ MSM_BUS_ERR("Unable to init node info, ret: %d\n", ret);
+ kfree(info);
+ }
+
+ for (j = 0; j < NUM_CTX; j++)
+ clk_disable_unprepare(fabric->info.nodeclk[j].clk);
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Disable iface_clk after node init: %d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+
+
+ }
+
+ MSM_BUS_DBG("Fabric: %d nmasters: %d nslaves: %d\n"
+ " ntieredslaves: %d, rpm_enabled: %d\n",
+ fabric->fabdev.id, fabric->pdata->nmasters,
+ fabric->pdata->nslaves, fabric->pdata->ntieredslaves,
+ fabric->pdata->rpm_enabled);
+ MSM_BUS_DBG("msm_bus_register_fabric_info i: %d\n", i);
+ fabric->num_nodes = fabric->pdata->len;
+error:
+ fabric->num_nodes = i;
+ msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0,
+ MSM_BUS_DBG_REGISTER);
+ return ret | err;
+}
+
+/**
+ * msm_bus_fabric_update_clks() - Set the clocks for fabrics and slaves
+ * @fabric: Fabric for which the clocks need to be updated
+ * @slave: The node for which the clocks need to be updated
+ * @index: The index for which the current clocks are set
+ * @curr_clk_hz:Current clock value
+ * @req_clk_hz: Requested clock value
+ * @bwsum: Bandwidth Sum
+ * @clk_flag: Flag determining whether fabric clock or the slave clock has to
+ * be set. If clk_flag is set, fabric clock is set, else slave clock is set.
+ */
+static int msm_bus_fabric_update_clks(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *slave, int index,
+ uint64_t curr_clk_hz, uint64_t req_clk_hz,
+ uint64_t bwsum_hz, int clk_flag, int ctx,
+ unsigned int cl_active_flag)
+{
+ int i, status = 0;
+ uint64_t max_pclk = 0, rate;
+ uint64_t *pclk = NULL;
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ struct nodeclk *nodeclk;
+
+ /**
+ * Integration for clock rates is not required if context is not
+ * same as client's active-only flag
+ */
+ if (ctx != cl_active_flag)
+ goto skip_set_clks;
+
+ /* Maximum for this gateway */
+ for (i = 0; i <= slave->num_pnodes; i++) {
+ if (i == index && (req_clk_hz < curr_clk_hz))
+ continue;
+ slave->pnode[i].sel_clk = &slave->pnode[i].clk[ctx];
+ max_pclk = max(max_pclk, *slave->pnode[i].sel_clk);
+ }
+
+ *slave->link_info.sel_clk =
+ max(max_pclk, max(bwsum_hz, req_clk_hz));
+ /* Is this gateway or slave? */
+ if (clk_flag && (!fabric->ahb)) {
+ struct msm_bus_fabnodeinfo *fabgw = NULL;
+ struct msm_bus_inode_info *info = NULL;
+ /* Maximum of all gateways set at fabric */
+ list_for_each_entry(fabgw, &fabric->gateways, list) {
+ info = fabgw->info;
+ if (!info)
+ continue;
+ info->link_info.sel_clk = &info->link_info.clk[ctx];
+ max_pclk = max(max_pclk, *info->link_info.sel_clk);
+ }
+ MSM_BUS_DBG("max_pclk from gateways: %llu\n", max_pclk);
+
+ /* Maximum of all slave clocks. */
+
+ for (i = 0; i < fabric->pdata->len; i++) {
+ if (fabric->pdata->info[i].gateway ||
+ (fabric->pdata->info[i].id < SLAVE_ID_KEY))
+ continue;
+ info = radix_tree_lookup(&fabric->fab_tree,
+ fabric->pdata->info[i].priv_id);
+ if (!info)
+ continue;
+ info->link_info.sel_clk = &info->link_info.clk[ctx];
+ max_pclk = max(max_pclk, *info->link_info.sel_clk);
+ }
+
+
+ MSM_BUS_DBG("max_pclk from slaves & gws: %llu\n", max_pclk);
+ fabric->info.link_info.sel_clk =
+ &fabric->info.link_info.clk[ctx];
+ pclk = fabric->info.link_info.sel_clk;
+ } else {
+ slave->link_info.sel_clk = &slave->link_info.clk[ctx];
+ pclk = slave->link_info.sel_clk;
+ }
+
+
+ *pclk = max(max_pclk, max(bwsum_hz, req_clk_hz));
+
+ if (!fabric->pdata->rpm_enabled)
+ goto skip_set_clks;
+
+ if (clk_flag) {
+ nodeclk = &fabric->info.nodeclk[ctx];
+ if (nodeclk->clk) {
+ MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz:%llu\n",
+ fabric->fabdev.id, *pclk, bwsum_hz);
+ if (nodeclk->rate != *pclk) {
+ nodeclk->dirty = true;
+ nodeclk->rate = *pclk;
+ }
+ fabric->clk_dirty = true;
+ }
+ } else {
+ nodeclk = &slave->nodeclk[ctx];
+ if (nodeclk->clk) {
+ rate = *pclk;
+ MSM_BUS_DBG("clks: id: %d set-clk: %llu bws_hz: %llu\n",
+ slave->node_info->priv_id, rate,
+ bwsum_hz);
+ if (nodeclk->rate != rate) {
+ nodeclk->dirty = true;
+ nodeclk->rate = rate;
+ }
+ }
+ if (!status && slave->memclk[ctx].clk) {
+ rate = *slave->link_info.sel_clk;
+ if (slave->memclk[ctx].rate != rate) {
+ slave->memclk[ctx].rate = rate;
+ slave->memclk[ctx].dirty = true;
+ }
+ slave->memclk[ctx].rate = rate;
+ fabric->clk_dirty = true;
+ }
+ }
+skip_set_clks:
+ return status;
+}
+
+void msm_bus_fabric_update_bw(struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *hop, struct msm_bus_inode_info *info,
+ int64_t add_bw, int *master_tiers, int ctx)
+{
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ void *sel_cdata;
+ long rounded_rate, cur_rate;
+
+ sel_cdata = fabric->cdata[ctx];
+
+ /* If it's an ahb fabric, don't calculate arb values */
+ if (fabric->ahb) {
+ MSM_BUS_DBG("AHB fabric, skipping bw calculation\n");
+ return;
+ }
+ if (!add_bw) {
+ MSM_BUS_DBG("No bandwidth delta. Skipping commit\n");
+ return;
+ }
+
+ if ((info->node_info->hw_sel != MSM_BUS_RPM) &&
+ fabdev->hw_algo.update_bw_reg(info->node_info->mode)) {
+ /* Enable clocks before accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk) {
+ if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
+ cur_rate = clk_get_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ rounded_rate = clk_round_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ cur_rate ? cur_rate : 1);
+ if (clk_set_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ rounded_rate))
+ MSM_BUS_ERR("clk en:Node:%d rate:%ld",
+ fabric->fabdev.id, rounded_rate);
+
+ clk_prepare_enable(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ }
+ }
+
+ if (info->iface_clk.clk)
+ clk_prepare_enable(info->iface_clk.clk);
+
+ if (hop->iface_clk.clk)
+ clk_prepare_enable(hop->iface_clk.clk);
+ }
+
+ fabdev->hw_algo.update_bw(hop, info, fabric->pdata, sel_cdata,
+ master_tiers, add_bw);
+
+ if ((info->node_info->hw_sel != MSM_BUS_RPM) &&
+ fabdev->hw_algo.update_bw_reg(info->node_info->mode)) {
+ /* Disable clocks after accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk &&
+ fabric->info.nodeclk[DUAL_CTX].rate == 0)
+ clk_disable_unprepare(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Commented:Will disable clk for info:%d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+
+ if (hop->iface_clk.clk) {
+ MSM_BUS_DBG("Commented Will disable clk for hop:%d\n",
+ hop->node_info->priv_id);
+ clk_disable_unprepare(hop->iface_clk.clk);
+ }
+ }
+
+ fabric->arb_dirty = true;
+}
+
+static int msm_bus_fabric_clk_set(int enable, struct msm_bus_inode_info *info)
+{
+ int i, status = 0;
+ long rounded_rate;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ if (info->nodeclk[i].dirty) {
+ if (info->nodeclk[i].rate != 0) {
+ rounded_rate = clk_round_rate(info->
+ nodeclk[i].clk, info->nodeclk[i].rate);
+ status = clk_set_rate(info->nodeclk[i].clk,
+ rounded_rate);
+ MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+ info->node_info->id, rounded_rate);
+ }
+
+ if (enable && !(info->nodeclk[i].enable)) {
+ clk_prepare_enable(info->nodeclk[i].clk);
+ info->nodeclk[i].dirty = false;
+ info->nodeclk[i].enable = true;
+ } else if ((info->nodeclk[i].rate == 0) && (!enable)
+ && (info->nodeclk[i].enable)) {
+ clk_disable_unprepare(info->nodeclk[i].clk);
+ info->nodeclk[i].dirty = false;
+ info->nodeclk[i].enable = false;
+ }
+ }
+
+ if (info->memclk[i].dirty) {
+ if (info->nodeclk[i].rate != 0) {
+ rounded_rate = clk_round_rate(info->
+ memclk[i].clk, info->memclk[i].rate);
+ status = clk_set_rate(info->memclk[i].clk,
+ rounded_rate);
+ MSM_BUS_DBG("AXI: node: %d set_rate: %ld\n",
+ info->node_info->id, rounded_rate);
+ }
+
+ if (enable && !(info->memclk[i].enable)) {
+ clk_prepare_enable(info->memclk[i].clk);
+ info->memclk[i].dirty = false;
+ info->memclk[i].enable = true;
+ } else if (info->memclk[i].rate == 0 && (!enable) &&
+ (info->memclk[i].enable)) {
+ clk_disable_unprepare(info->memclk[i].clk);
+ info->memclk[i].dirty = false;
+ info->memclk[i].enable = false;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+ * msm_bus_fabric_clk_commit() - Call clock enable and update clock
+ * values.
+*/
+static int msm_bus_fabric_clk_commit(int enable, struct msm_bus_fabric *fabric)
+{
+ unsigned int i, nfound = 0, status = 0;
+ struct msm_bus_inode_info *info[fabric->pdata->nslaves];
+
+ if (fabric->clk_dirty == true)
+ status = msm_bus_fabric_clk_set(enable, &fabric->info);
+
+ if (status)
+ MSM_BUS_WARN("Error setting clocks on fabric: %d\n",
+ fabric->fabdev.id);
+
+ nfound = radix_tree_gang_lookup_tag(&fabric->fab_tree, (void **)&info,
+ fabric->fabdev.id, fabric->pdata->nslaves, CLK_NODE);
+ if (nfound == 0) {
+ MSM_BUS_DBG("No clock nodes found for fabric: %d\n",
+ fabric->fabdev.id);
+ goto out;
+ }
+
+ for (i = 0; i < nfound; i++) {
+ status = msm_bus_fabric_clk_set(enable, info[i]);
+ if (status)
+ MSM_BUS_WARN("Error setting clocks for node: %d\n",
+ info[i]->node_info->id);
+ }
+
+out:
+ return status;
+}
+
+static void msm_bus_fabric_config_limiter(
+ struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ long rounded_rate, cur_rate;
+
+ if (fabdev->hw_algo.config_limiter == NULL)
+ return;
+
+ /* Enable clocks before accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk) {
+ if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
+ cur_rate = clk_get_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ rounded_rate = clk_round_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ cur_rate ? cur_rate : 1);
+ if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
+ rounded_rate))
+ MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
+ fabric->fabdev.id, rounded_rate);
+
+ clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+ }
+ }
+
+ if (info->iface_clk.clk)
+ clk_prepare_enable(info->iface_clk.clk);
+
+ fabdev->hw_algo.config_limiter(fabric->pdata, info);
+
+ /* Disable clocks after accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk &&
+ fabric->info.nodeclk[DUAL_CTX].rate == 0)
+ clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk);
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Commented: Will disable clock for info: %d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+}
+
+static void msm_bus_fabric_config_master(
+ struct msm_bus_fabric_device *fabdev,
+ struct msm_bus_inode_info *info, uint64_t req_clk, uint64_t req_bw)
+{
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ long rounded_rate, cur_rate;
+
+ if (fabdev->hw_algo.config_master == NULL)
+ return;
+
+ /* Enable clocks before accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk) {
+ if (fabric->info.nodeclk[DUAL_CTX].rate == 0) {
+ cur_rate = clk_get_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk);
+ rounded_rate = clk_round_rate(
+ fabric->info.nodeclk[DUAL_CTX].clk,
+ cur_rate ? cur_rate : 1);
+ if (clk_set_rate(fabric->info.nodeclk[DUAL_CTX].clk,
+ rounded_rate))
+ MSM_BUS_ERR("Error: clk: en: Node: %d rate: %ld",
+ fabric->fabdev.id, rounded_rate);
+
+ clk_prepare_enable(fabric->info.nodeclk[DUAL_CTX].clk);
+ }
+ }
+
+ if (info->iface_clk.clk)
+ clk_prepare_enable(info->iface_clk.clk);
+
+ fabdev->hw_algo.config_master(fabric->pdata, info, req_clk, req_bw);
+
+ /* Disable clocks after accessing QoS registers */
+ if (fabric->info.nodeclk[DUAL_CTX].clk &&
+ fabric->info.nodeclk[DUAL_CTX].rate == 0)
+ clk_disable_unprepare(fabric->info.nodeclk[DUAL_CTX].clk);
+
+ if (info->iface_clk.clk) {
+ MSM_BUS_DBG("Commented: Will disable clock for info: %d\n",
+ info->node_info->priv_id);
+ clk_disable_unprepare(info->iface_clk.clk);
+ }
+}
+
+/**
+ * msm_bus_fabric_hw_commit() - Commit the arbitration data to Hardware.
+ * @fabric: Fabric for which the data should be committed
+ * */
+static int msm_bus_fabric_hw_commit(struct msm_bus_fabric_device *fabdev)
+{
+ int status = 0;
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+
+ /*
+ * For a non-zero bandwidth request, clocks should be enabled before
+ * sending the arbitration data to RPM, but should be disabled only
+ * after commiting the data.
+ */
+ status = msm_bus_fabric_clk_commit(ENABLE, fabric);
+ if (status)
+ MSM_BUS_DBG("Error setting clocks on fabric: %d\n",
+ fabric->fabdev.id);
+
+ if (!fabric->arb_dirty) {
+ MSM_BUS_DBG("Not committing as fabric not arb_dirty\n");
+ goto skip_arb;
+ }
+
+ status = fabdev->hw_algo.commit(fabric->pdata, fabric->hw_data,
+ (void **)fabric->cdata);
+ if (status)
+ MSM_BUS_DBG("Error committing arb data for fabric: %d\n",
+ fabric->fabdev.id);
+
+ fabric->arb_dirty = false;
+skip_arb:
+ /*
+ * If the bandwidth request is 0 for a fabric, the clocks
+ * should be disabled after arbitration data is committed.
+ */
+ status = msm_bus_fabric_clk_commit(DISABLE, fabric);
+ if (status)
+ MSM_BUS_WARN("Error disabling clocks on fabric: %d\n",
+ fabric->fabdev.id);
+ fabric->clk_dirty = false;
+ return status;
+}
+
+/**
+ * msm_bus_fabric_port_halt() - Used to halt a master port
+ * @fabric: Fabric on which the current master node is present
+ * @portid: Port id of the master
+ */
+int msm_bus_fabric_port_halt(struct msm_bus_fabric_device *fabdev, int iid)
+{
+ struct msm_bus_inode_info *info = NULL;
+ uint8_t mport;
+ uint32_t haltid = 0;
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+
+ info = fabdev->algo->find_node(fabdev, iid);
+ if (!info) {
+ MSM_BUS_ERR("Error: Info not found for id: %u", iid);
+ return -EINVAL;
+ }
+
+ haltid = fabric->pdata->haltid;
+ mport = info->node_info->masterp[0];
+
+ return fabdev->hw_algo.port_halt(haltid, mport);
+}
+
+/**
+ * msm_bus_fabric_port_unhalt() - Used to unhalt a master port
+ * @fabric: Fabric on which the current master node is present
+ * @portid: Port id of the master
+ */
+int msm_bus_fabric_port_unhalt(struct msm_bus_fabric_device *fabdev, int iid)
+{
+ struct msm_bus_inode_info *info = NULL;
+ uint8_t mport;
+ uint32_t haltid = 0;
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+
+ info = fabdev->algo->find_node(fabdev, iid);
+ if (!info) {
+ MSM_BUS_ERR("Error: Info not found for id: %u", iid);
+ return -EINVAL;
+ }
+
+ haltid = fabric->pdata->haltid;
+ mport = info->node_info->masterp[0];
+ return fabdev->hw_algo.port_unhalt(haltid, mport);
+}
+
+/**
+ * msm_bus_fabric_find_gw_node() - This function finds the gateway node
+ * attached on a given fabric
+ * @id: ID of the gateway node
+ * @fabric: Fabric to find the gateway node on
+ * Function returns: Pointer to the gateway node
+ */
+static struct msm_bus_inode_info *msm_bus_fabric_find_gw_node(struct
+ msm_bus_fabric_device * fabdev, int id)
+{
+ struct msm_bus_inode_info *info = NULL;
+ struct msm_bus_fabnodeinfo *fab;
+ struct msm_bus_fabric *fabric;
+ if (!fabdev) {
+ MSM_BUS_ERR("No fabric device found!\n");
+ return NULL;
+ }
+
+ fabric = to_msm_bus_fabric(fabdev);
+ if (!fabric || IS_ERR(fabric)) {
+ MSM_BUS_ERR("No fabric type found!\n");
+ return NULL;
+ }
+ list_for_each_entry(fab, &fabric->gateways, list) {
+ if (fab->info->node_info->priv_id == id) {
+ info = fab->info;
+ break;
+ }
+ }
+
+ return info;
+}
+
+static struct msm_bus_inode_info *msm_bus_fabric_find_node(struct
+ msm_bus_fabric_device * fabdev, int id)
+{
+ struct msm_bus_inode_info *info = NULL;
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ info = radix_tree_lookup(&fabric->fab_tree, id);
+ if (!info)
+ MSM_BUS_ERR("Null info found for id %d\n", id);
+ return info;
+}
+
+static struct list_head *msm_bus_fabric_get_gw_list(struct msm_bus_fabric_device
+ *fabdev)
+{
+ struct msm_bus_fabric *fabric = to_msm_bus_fabric(fabdev);
+ if (!fabric || IS_ERR(fabric)) {
+ MSM_BUS_ERR("No fabric found from fabdev\n");
+ return NULL;
+ }
+ return &fabric->gateways;
+
+}
+static struct msm_bus_fab_algorithm msm_bus_algo = {
+ .update_clks = msm_bus_fabric_update_clks,
+ .update_bw = msm_bus_fabric_update_bw,
+ .port_halt = msm_bus_fabric_port_halt,
+ .port_unhalt = msm_bus_fabric_port_unhalt,
+ .commit = msm_bus_fabric_hw_commit,
+ .find_node = msm_bus_fabric_find_node,
+ .find_gw_node = msm_bus_fabric_find_gw_node,
+ .get_gw_list = msm_bus_fabric_get_gw_list,
+ .config_master = msm_bus_fabric_config_master,
+ .config_limiter = msm_bus_fabric_config_limiter,
+};
+
+static int msm_bus_fabric_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ int ret = 0;
+
+ switch (pdata->hw_sel) {
+ case MSM_BUS_NOC:
+ msm_bus_noc_hw_init(pdata, hw_algo);
+ break;
+ case MSM_BUS_BIMC:
+ msm_bus_bimc_hw_init(pdata, hw_algo);
+ break;
+ default:
+ ret = msm_bus_rpm_hw_init(pdata, hw_algo);
+ if (ret) {
+ MSM_BUS_ERR("RPM initialization failed\n");
+ ret = -EINVAL;
+ }
+ break;
+ }
+ return ret;
+}
+
+static int msm_bus_fabric_probe(struct platform_device *pdev)
+{
+ int ctx, ret = 0;
+ struct msm_bus_fabric *fabric;
+ struct msm_bus_fabric_registration *pdata;
+
+ fabric = kzalloc(sizeof(struct msm_bus_fabric), GFP_KERNEL);
+ if (!fabric) {
+ MSM_BUS_ERR("Fabric alloc failed\n");
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&fabric->gateways);
+ INIT_RADIX_TREE(&fabric->fab_tree, GFP_ATOMIC);
+ fabric->num_nodes = 0;
+ fabric->fabdev.visited = false;
+
+ fabric->info.node_info = kzalloc(sizeof(struct msm_bus_node_info),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(fabric->info.node_info)) {
+ MSM_BUS_ERR("Fabric node info alloc failed\n");
+ kfree(fabric);
+ return -ENOMEM;
+ }
+
+ fabric->info.num_pnodes = -1;
+ fabric->info.link_info.clk[DUAL_CTX] = 0;
+ fabric->info.link_info.bw[DUAL_CTX] = 0;
+ fabric->info.link_info.clk[ACTIVE_CTX] = 0;
+ fabric->info.link_info.bw[ACTIVE_CTX] = 0;
+
+ /* If possible, get pdata from device-tree */
+ if (pdev->dev.of_node) {
+ pdata = msm_bus_of_get_fab_data(pdev);
+ if (IS_ERR(pdata) || ZERO_OR_NULL_PTR(pdata)) {
+ pr_err("Null platform data\n");
+ kfree(fabric->info.node_info);
+ kfree(fabric);
+ return PTR_ERR(pdata);
+ }
+ msm_bus_board_init(pdata);
+ fabric->fabdev.id = pdata->id;
+ msm_bus_of_get_nfab(pdev, pdata);
+ } else {
+ pdata = (struct msm_bus_fabric_registration *)pdev->
+ dev.platform_data;
+ fabric->fabdev.id = pdev->id;
+ }
+
+ fabric->fabdev.name = pdata->name;
+ fabric->fabdev.nr_lim_thresh = pdata->nr_lim_thresh;
+ fabric->fabdev.eff_fact = pdata->eff_fact;
+ fabric->fabdev.algo = &msm_bus_algo;
+ fabric->info.node_info->priv_id = fabric->fabdev.id;
+ fabric->info.node_info->id = fabric->fabdev.id;
+ ret = msm_bus_fabric_hw_init(pdata, &fabric->fabdev.hw_algo);
+ if (ret) {
+ MSM_BUS_ERR("Error initializing hardware for fabric: %d\n",
+ fabric->fabdev.id);
+ goto err;
+ }
+
+ fabric->ahb = pdata->ahb;
+ fabric->pdata = pdata;
+ fabric->pdata->board_algo->assign_iids(fabric->pdata,
+ fabric->fabdev.id);
+ fabric->fabdev.board_algo = fabric->pdata->board_algo;
+
+ /*
+ * clk and bw for fabric->info will contain the max bw and clk
+ * it will allow. This info will come from the boards file.
+ */
+ ret = msm_bus_fabric_device_register(&fabric->fabdev);
+ if (ret) {
+ MSM_BUS_ERR("Error registering fabric %d ret %d\n",
+ fabric->fabdev.id, ret);
+ goto err;
+ }
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (pdata->fabclk[ctx]) {
+ fabric->info.nodeclk[ctx].clk = clk_get(
+ &fabric->fabdev.dev, pdata->fabclk[ctx]);
+ if (IS_ERR(fabric->info.nodeclk[ctx].clk)) {
+ MSM_BUS_ERR("Couldn't get clock %s\n",
+ pdata->fabclk[ctx]);
+ ret = -EINVAL;
+ goto err;
+ }
+ fabric->info.nodeclk[ctx].enable = false;
+ fabric->info.nodeclk[ctx].dirty = false;
+ }
+ }
+
+ /* Find num. of slaves, masters, populate gateways, radix tree */
+ ret = register_fabric_info(pdev, fabric);
+ if (ret) {
+ MSM_BUS_ERR("Could not register fabric %d info, ret: %d\n",
+ fabric->fabdev.id, ret);
+ goto err;
+ }
+ if (!fabric->ahb) {
+ /* Allocate memory for commit data */
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ ret = fabric->fabdev.hw_algo.allocate_commit_data(
+ fabric->pdata, &fabric->cdata[ctx], ctx);
+ if (ret) {
+ MSM_BUS_ERR("Failed to alloc commit data for "
+ "fab: %d, ret = %d\n",
+ fabric->fabdev.id, ret);
+ goto err;
+ }
+ }
+ }
+
+ if (msmbus_coresight_init(pdev))
+ pr_warn("Coresight support absent for bus: %d\n", pdata->id);
+
+ return ret;
+err:
+ kfree(fabric->info.node_info);
+ kfree(fabric);
+ return ret;
+}
+
+static int msm_bus_fabric_remove(struct platform_device *pdev)
+{
+ struct msm_bus_fabric_device *fabdev = NULL;
+ struct msm_bus_fabric *fabric;
+ int i;
+ int ret = 0;
+
+ fabdev = platform_get_drvdata(pdev);
+ msmbus_coresight_remove(pdev);
+ msm_bus_fabric_device_unregister(fabdev);
+ fabric = to_msm_bus_fabric(fabdev);
+ msm_bus_dbg_commit_data(fabric->fabdev.name, NULL, 0, 0, 0,
+ MSM_BUS_DBG_UNREGISTER);
+ for (i = 0; i < fabric->pdata->nmasters; i++)
+ radix_tree_delete(&fabric->fab_tree, fabric->fabdev.id + i);
+ for (i = (fabric->fabdev.id + SLAVE_ID_KEY); i <
+ fabric->pdata->nslaves; i++)
+ radix_tree_delete(&fabric->fab_tree, i);
+ if (!fabric->ahb) {
+ fabdev->hw_algo.free_commit_data(fabric->cdata[DUAL_CTX]);
+ fabdev->hw_algo.free_commit_data(fabric->cdata[ACTIVE_CTX]);
+ }
+
+ kfree(fabric->info.node_info);
+ kfree(fabric->hw_data);
+ kfree(fabric);
+ return ret;
+}
+
+static struct of_device_id fabric_match[] = {
+ {.compatible = "msm-bus-fabric"},
+ {}
+};
+
+static struct platform_driver msm_bus_fabric_driver = {
+ .probe = msm_bus_fabric_probe,
+ .remove = msm_bus_fabric_remove,
+ .driver = {
+ .name = "msm_bus_fabric",
+ .owner = THIS_MODULE,
+ .of_match_table = fabric_match,
+ },
+};
+
+int __init msm_bus_fabric_init_driver(void)
+{
+ static bool initialized;
+
+ if (initialized)
+ return 0;
+ else
+ initialized = true;
+
+ MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+ msm_bus_arb_setops_legacy(&arb_ops);
+ return platform_driver_register(&msm_bus_fabric_driver);
+}
+EXPORT_SYMBOL(msm_bus_fabric_init_driver);
+subsys_initcall(msm_bus_fabric_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
new file mode 100644
index 0000000000000..eb369aa8df764
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -0,0 +1,1262 @@
+/* Copyright (c) 2014, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd-rpm.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+struct static_rules_type {
+ int num_rules;
+ struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int enable_nodeclk(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (!nclk->enable) {
+ ret = clk_prepare_enable(nclk->clk);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+ nclk->enable = false;
+ } else
+ nclk->enable = true;
+ }
+ return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+ int ret = 0;
+
+ if (nclk->enable) {
+ clk_disable_unprepare(nclk->clk);
+ nclk->enable = false;
+ }
+ return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+ int ret = 0;
+
+ ret = clk_set_rate(nclk->clk, rate);
+
+ if (ret)
+ MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+ return ret;
+}
+
+static int msm_bus_agg_fab_clks(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *node = NULL;
+ int ret = 0;
+ int ctx = *(int *)data;
+
+ if (ctx >= NUM_CTX) {
+ MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx);
+ goto exit_agg_fab_clks;
+ }
+
+ node = bus_dev->platform_data;
+ if (!node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ goto exit_agg_fab_clks;
+ }
+
+ if (!node->node_info->is_fab_dev) {
+ struct msm_bus_node_device_type *bus_dev = NULL;
+
+ bus_dev = node->node_info->bus_device->platform_data;
+
+ if (node->cur_clk_hz[ctx] >= bus_dev->cur_clk_hz[ctx])
+ bus_dev->cur_clk_hz[ctx] = node->cur_clk_hz[ctx];
+ }
+
+exit_agg_fab_clks:
+ return ret;
+}
+
+static int msm_bus_reset_fab_clks(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *node = NULL;
+ int ret = 0;
+ int ctx = *(int *)data;
+
+ if (ctx >= NUM_CTX) {
+ MSM_BUS_ERR("%s: Invalid Context %d", __func__, ctx);
+ goto exit_reset_fab_clks;
+ }
+
+ node = bus_dev->platform_data;
+ if (!node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ goto exit_reset_fab_clks;
+ }
+
+ if (node->node_info->is_fab_dev) {
+ node->cur_clk_hz[ctx] = 0;
+ MSM_BUS_DBG("Resetting for node %d", node->node_info->id);
+ }
+exit_reset_fab_clks:
+ return ret;
+}
+
+
+static int send_rpm_msg(struct device *device)
+{
+ int ret = 0;
+ int ctx;
+ int rsc_type;
+ struct msm_bus_node_device_type *ndev =
+ device->platform_data;
+ struct qcom_msm_bus_req req = {
+ .nbytes = sizeof(uint64_t),
+ .key = RPM_MASTER_FIELD_BW,
+ };
+
+ if (!ndev) {
+ MSM_BUS_ERR("%s: Error getting node info.", __func__);
+ ret = -ENODEV;
+ goto exit_send_rpm_msg;
+ }
+
+ for (ctx = QCOM_SMD_RPM_ACTIVE_STATE; ctx <= QCOM_SMD_RPM_SLEEP_STATE;
+ ctx++) {
+ if (ctx == QCOM_SMD_RPM_ACTIVE_STATE)
+ req.value =
+ ndev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE];
+ else {
+ req.value =
+ ndev->node_ab.ab[QCOM_SMD_RPM_SLEEP_STATE];
+ }
+
+ if (ndev->node_info->mas_rpm_id != -1) {
+ rsc_type = RPM_BUS_MASTER_REQ;
+ ret = qcom_rpm_bus_send_message(ctx, rsc_type,
+ ndev->node_info->mas_rpm_id, &req);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to send RPM message:",
+ __func__);
+ MSM_BUS_ERR("%s:Node Id %d RPM id %d",
+ __func__, ndev->node_info->id,
+ ndev->node_info->mas_rpm_id);
+ goto exit_send_rpm_msg;
+ }
+ }
+
+ if (ndev->node_info->slv_rpm_id != -1) {
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ ret = qcom_rpm_bus_send_message(ctx, rsc_type,
+ ndev->node_info->slv_rpm_id, &req);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to send RPM message:",
+ __func__);
+ MSM_BUS_ERR("%s: Node Id %d RPM id %d",
+ __func__, ndev->node_info->id,
+ ndev->node_info->slv_rpm_id);
+ goto exit_send_rpm_msg;
+ }
+ }
+ }
+exit_send_rpm_msg:
+ return ret;
+}
+
+static int flush_bw_data(struct device *node_device, int ctx)
+{
+ struct msm_bus_node_device_type *node_info;
+ int ret = 0;
+
+ node_info = node_device->platform_data;
+ if (!node_info) {
+ MSM_BUS_ERR("%s: Unable to find bus device for device %d",
+ __func__, node_info->node_info->id);
+ ret = -ENODEV;
+ goto exit_flush_bw_data;
+ }
+
+ if (node_info->node_ab.dirty) {
+ if (node_info->ap_owned) {
+ struct msm_bus_node_device_type *bus_device =
+ node_info->node_info->bus_device->platform_data;
+ struct msm_bus_fab_device_type *fabdev =
+ bus_device->fabdev;
+
+ if (fabdev && fabdev->noc_ops.update_bw_reg &&
+ fabdev->noc_ops.update_bw_reg
+ (node_info->node_info->qos_params.mode))
+ ret = fabdev->noc_ops.set_bw(node_info,
+ fabdev->qos_base,
+ fabdev->base_offset,
+ fabdev->qos_off,
+ fabdev->qos_freq);
+ } else {
+ ret = send_rpm_msg(node_device);
+
+ if (ret)
+ MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
+ __func__, node_info->node_info->id);
+ }
+ node_info->node_ab.dirty = false;
+ }
+
+exit_flush_bw_data:
+ return ret;
+
+}
+
+static int flush_clk_data(struct device *node_device, int ctx)
+{
+ struct msm_bus_node_device_type *node;
+ struct nodeclk *nodeclk = NULL;
+ int ret = 0;
+
+ node = node_device->platform_data;
+ if (!node) {
+ MSM_BUS_ERR("Unable to find bus device");
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+
+ nodeclk = &node->clk[ctx];
+ if (node->node_info->is_fab_dev) {
+ if (nodeclk->rate != node->cur_clk_hz[ctx]) {
+ nodeclk->rate = node->cur_clk_hz[ctx];
+ nodeclk->dirty = true;
+ }
+ }
+
+ if (nodeclk && nodeclk->clk && nodeclk->dirty) {
+ long rounded_rate;
+
+ if (nodeclk->rate) {
+ rounded_rate = clk_round_rate(nodeclk->clk,
+ nodeclk->rate);
+ ret = setrate_nodeclk(nodeclk, rounded_rate);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
+ __func__, rounded_rate,
+ node->node_info->id);
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+
+ ret = enable_nodeclk(nodeclk);
+
+ if ((node->node_info->is_fab_dev) &&
+ !IS_ERR_OR_NULL(node->qos_clk.clk))
+ ret = enable_nodeclk(&node->qos_clk);
+ } else {
+ if ((node->node_info->is_fab_dev) &&
+ !IS_ERR_OR_NULL(node->qos_clk.clk))
+ ret = disable_nodeclk(&node->qos_clk);
+
+ ret = disable_nodeclk(nodeclk);
+ }
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
+ node->node_info->id);
+ ret = -ENODEV;
+ goto exit_flush_clk_data;
+ }
+ MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
+ node->node_info->id, nodeclk->rate);
+
+ }
+exit_flush_clk_data:
+ /* Reset the aggregated clock rate for fab devices*/
+ if (node && node->node_info->is_fab_dev)
+ node->cur_clk_hz[ctx] = 0;
+
+ if (nodeclk)
+ nodeclk->dirty = 0;
+ return ret;
+}
+
+int msm_bus_commit_data(int *dirty_nodes, int ctx, int num_dirty)
+{
+ int ret = 0;
+ int i = 0;
+
+ /* Aggregate the bus clocks */
+ bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx,
+ msm_bus_agg_fab_clks);
+
+ for (i = 0; i < num_dirty; i++) {
+ struct device *node_device =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&dirty_nodes[i],
+ msm_bus_device_match_adhoc);
+
+ if (!node_device) {
+ MSM_BUS_ERR("Can't find device for %d", dirty_nodes[i]);
+ continue;
+ }
+
+ ret = flush_bw_data(node_device, ctx);
+ if (ret)
+ MSM_BUS_ERR("%s: Error flushing bw data for node %d",
+ __func__, dirty_nodes[i]);
+
+ ret = flush_clk_data(node_device, ctx);
+ if (ret)
+ MSM_BUS_ERR("%s: Error flushing clk data for node %d",
+ __func__, dirty_nodes[i]);
+ }
+ kfree(dirty_nodes);
+ /* Aggregate the bus clocks */
+ bus_for_each_dev(&msm_bus_type, NULL, (void *)&ctx,
+ msm_bus_reset_fab_clks);
+ return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+ size_t new_size, gfp_t flags)
+{
+ void *ret;
+ size_t copy_size = old_size;
+
+ if (!new_size) {
+ devm_kfree(dev, p);
+ return ZERO_SIZE_PTR;
+ }
+
+ if (new_size < old_size)
+ copy_size = new_size;
+
+ ret = devm_kzalloc(dev, new_size, flags);
+ if (!ret) {
+ MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
+ goto exit_realloc_devmem;
+ }
+
+ memcpy(ret, p, copy_size);
+ devm_kfree(dev, p);
+exit_realloc_devmem:
+ return ret;
+}
+
+
+static int add_dirty_node(int **dirty_nodes, int id, int *num_dirty)
+{
+ int i;
+ int found = 0;
+ int ret = 0;
+ int *dnode = NULL;
+
+ for (i = 0; i < *num_dirty; i++) {
+ if ((*dirty_nodes)[i] == id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ (*num_dirty)++;
+ dnode =
+ krealloc(*dirty_nodes, sizeof(int) * (*num_dirty),
+ GFP_KERNEL);
+
+ if (ZERO_OR_NULL_PTR(dnode)) {
+ MSM_BUS_ERR("%s: Failure allocating dirty nodes array",
+ __func__);
+ ret = -ENOMEM;
+ } else {
+ *dirty_nodes = dnode;
+ (*dirty_nodes)[(*num_dirty) - 1] = id;
+ }
+ }
+
+ return ret;
+}
+
+int msm_bus_update_bw(struct msm_bus_node_device_type *nodedev, int ctx,
+ int64_t add_bw, int **dirty_nodes, int *num_dirty)
+{
+ int ret = 0;
+ int i, j;
+ uint64_t cur_ab_slp = 0;
+ uint64_t cur_ab_act = 0;
+
+ if (nodedev->node_info->virt_dev)
+ goto exit_update_bw;
+
+ for (i = 0; i < NUM_CTX; i++) {
+ for (j = 0; j < nodedev->num_lnodes; j++) {
+ if (i == DUAL_CTX) {
+ cur_ab_act +=
+ nodedev->lnode_list[j].lnode_ab[i];
+ cur_ab_slp +=
+ nodedev->lnode_list[j].lnode_ab[i];
+ } else
+ cur_ab_act +=
+ nodedev->lnode_list[j].lnode_ab[i];
+ }
+ }
+
+ if (nodedev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE] != cur_ab_act) {
+ nodedev->node_ab.ab[QCOM_SMD_RPM_ACTIVE_STATE] = cur_ab_act;
+ nodedev->node_ab.ab[QCOM_SMD_RPM_SLEEP_STATE] = cur_ab_slp;
+ nodedev->node_ab.dirty = true;
+ ret = add_dirty_node(dirty_nodes, nodedev->node_info->id,
+ num_dirty);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
+ nodedev->node_info->id);
+ goto exit_update_bw;
+ }
+ }
+
+exit_update_bw:
+ return ret;
+}
+
+int msm_bus_update_clks(struct msm_bus_node_device_type *nodedev,
+ int ctx, int **dirty_nodes, int *num_dirty)
+{
+ int status = 0;
+ struct nodeclk *nodeclk;
+ struct nodeclk *busclk;
+ struct msm_bus_node_device_type *bus_info = NULL;
+ uint64_t req_clk;
+
+ bus_info = nodedev->node_info->bus_device->platform_data;
+
+ if (!bus_info) {
+ MSM_BUS_ERR("%s: Unable to find bus device for device %d",
+ __func__, nodedev->node_info->id);
+ status = -ENODEV;
+ goto exit_set_clks;
+ }
+
+ req_clk = nodedev->cur_clk_hz[ctx];
+ busclk = &bus_info->clk[ctx];
+
+ if (busclk->rate != req_clk) {
+ busclk->rate = req_clk;
+ busclk->dirty = 1;
+ MSM_BUS_DBG("%s: Modifying bus clk %d Rate %llu", __func__,
+ bus_info->node_info->id, req_clk);
+ status = add_dirty_node(dirty_nodes, bus_info->node_info->id,
+ num_dirty);
+
+ if (status) {
+ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
+ bus_info->node_info->id);
+ goto exit_set_clks;
+ }
+ }
+
+ req_clk = nodedev->cur_clk_hz[ctx];
+ nodeclk = &nodedev->clk[ctx];
+
+ if (IS_ERR_OR_NULL(nodeclk))
+ goto exit_set_clks;
+
+ if (!nodeclk->dirty || (nodeclk->dirty && (nodeclk->rate < req_clk))) {
+ nodeclk->rate = req_clk;
+ nodeclk->dirty = 1;
+ MSM_BUS_DBG("%s: Modifying node clk %d Rate %llu", __func__,
+ nodedev->node_info->id, req_clk);
+ status = add_dirty_node(dirty_nodes, nodedev->node_info->id,
+ num_dirty);
+ if (status) {
+ MSM_BUS_ERR("%s: Failed to add dirty node %d", __func__,
+ nodedev->node_info->id);
+ goto exit_set_clks;
+ }
+ }
+
+exit_set_clks:
+ return status;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ switch (bus_dev->fabdev->bus_type) {
+ case MSM_BUS_NOC:
+ msm_bus_noc_set_ops(bus_dev);
+ break;
+ case MSM_BUS_BIMC:
+ msm_bus_bimc_set_ops(bus_dev);
+ break;
+ default:
+ MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+ }
+}
+
+static int msm_bus_qos_disable_clk(struct msm_bus_node_device_type *node,
+ int disable_bus_qos_clk)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int ret = 0;
+
+ if (!node) {
+ ret = -ENXIO;
+ goto exit_disable_qos_clk;
+ }
+
+ bus_node = node->node_info->bus_device->platform_data;
+
+ if (!bus_node) {
+ ret = -ENXIO;
+ goto exit_disable_qos_clk;
+ }
+
+ if (disable_bus_qos_clk)
+ ret = disable_nodeclk(&bus_node->clk[DUAL_CTX]);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to disable bus clk, node %d",
+ __func__, node->node_info->id);
+ goto exit_disable_qos_clk;
+ }
+
+ if (!IS_ERR_OR_NULL(node->qos_clk.clk)) {
+ ret = disable_nodeclk(&node->qos_clk);
+
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to disable mas qos clk,node %d",
+ __func__, node->node_info->id);
+ goto exit_disable_qos_clk;
+ }
+ }
+
+exit_disable_qos_clk:
+ return ret;
+}
+
+static int msm_bus_qos_enable_clk(struct msm_bus_node_device_type *node)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ long rounded_rate;
+ int ret = 0;
+ int bus_qos_enabled = 0;
+
+ if (!node) {
+ ret = -ENXIO;
+ goto exit_enable_qos_clk;
+ }
+
+ bus_node = node->node_info->bus_device->platform_data;
+
+ if (!bus_node) {
+ ret = -ENXIO;
+ goto exit_enable_qos_clk;
+ }
+
+ /* Check if the bus clk is already set before trying to set it
+ * Do this only during
+ * a. Bootup
+ * b. Only for bus clks
+ **/
+ if (!clk_get_rate(bus_node->clk[DUAL_CTX].clk)) {
+ rounded_rate = clk_round_rate(bus_node->clk[DUAL_CTX].clk, 1);
+ ret = setrate_nodeclk(&bus_node->clk[DUAL_CTX], rounded_rate);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to set bus clk, node %d",
+ __func__, node->node_info->id);
+ goto exit_enable_qos_clk;
+ }
+ }
+
+ ret = enable_nodeclk(&bus_node->clk[DUAL_CTX]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable bus clk, node %d",
+ __func__, node->node_info->id);
+ goto exit_enable_qos_clk;
+ }
+ bus_qos_enabled = 1;
+
+ if (!IS_ERR_OR_NULL(bus_node->qos_clk.clk)) {
+ ret = enable_nodeclk(&bus_node->qos_clk);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable bus QOS clk, node %d",
+ __func__, node->node_info->id);
+ goto exit_enable_qos_clk;
+ }
+ }
+
+ if (!IS_ERR_OR_NULL(node->qos_clk.clk)) {
+ rounded_rate = clk_round_rate(node->qos_clk.clk, 1);
+ ret = setrate_nodeclk(&node->qos_clk, rounded_rate);
+ if (ret) {
+ MSM_BUS_ERR("%s: Failed to enable mas qos clk, node %d",
+ __func__, node->node_info->id);
+ goto exit_enable_qos_clk;
+ }
+
+ ret = enable_nodeclk(&node->qos_clk);
+ if (ret) {
+ MSM_BUS_ERR("Err enable mas qos clk, node %d ret %d",
+ node->node_info->id, ret);
+ goto exit_enable_qos_clk;
+ }
+ }
+ ret = bus_qos_enabled;
+
+exit_enable_qos_clk:
+ return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+ bool enable, uint64_t lim_bw)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node_dev;
+
+ if (!node_dev) {
+ MSM_BUS_ERR("No device specified");
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ if (!node_dev->ap_owned) {
+ MSM_BUS_ERR("Device is not AP owned %d.",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+
+ bus_node_dev = node_dev->node_info->bus_device->platform_data;
+ if (!bus_node_dev) {
+ MSM_BUS_ERR("Unable to get bus device infofor %d",
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_enable_limiter;
+ }
+ if (bus_node_dev->fabdev &&
+ bus_node_dev->fabdev->noc_ops.limit_mport) {
+ if (ret < 0) {
+ MSM_BUS_ERR("Can't Enable QoS clk %d",
+ node_dev->node_info->id);
+ goto exit_enable_limiter;
+ }
+ bus_node_dev->fabdev->noc_ops.limit_mport(
+ node_dev,
+ bus_node_dev->fabdev->qos_base,
+ bus_node_dev->fabdev->base_offset,
+ bus_node_dev->fabdev->qos_off,
+ bus_node_dev->fabdev->qos_freq,
+ enable, lim_bw);
+ }
+
+exit_enable_limiter:
+ return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev = NULL;
+
+ node_dev = dev->platform_data;
+
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get node device info" , __func__);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+ if (node_dev->ap_owned) {
+ struct msm_bus_node_device_type *bus_node_info;
+
+ bus_node_info = node_dev->node_info->bus_device->platform_data;
+
+ if (!bus_node_info) {
+ MSM_BUS_ERR("%s: Unable to get bus device infofor %d",
+ __func__,
+ node_dev->node_info->id);
+ ret = -ENXIO;
+ goto exit_init_qos;
+ }
+
+ if (bus_node_info->fabdev &&
+ bus_node_info->fabdev->noc_ops.qos_init) {
+ int ret = 0;
+
+ if (node_dev->ap_owned &&
+ (node_dev->node_info->qos_params.mode) != -1) {
+
+ if (bus_node_info->fabdev->bypass_qos_prg)
+ goto exit_init_qos;
+
+ ret = msm_bus_qos_enable_clk(node_dev);
+ if (ret < 0) {
+ MSM_BUS_ERR("Can't Enable QoS clk %d",
+ node_dev->node_info->id);
+ goto exit_init_qos;
+ }
+
+ bus_node_info->fabdev->noc_ops.qos_init(
+ node_dev,
+ bus_node_info->fabdev->qos_base,
+ bus_node_info->fabdev->base_offset,
+ bus_node_info->fabdev->qos_off,
+ bus_node_info->fabdev->qos_freq);
+ ret = msm_bus_qos_disable_clk(node_dev, ret);
+ }
+ } else
+ MSM_BUS_ERR("%s: Skipping QOS init for %d",
+ __func__, node_dev->node_info->id);
+ }
+exit_init_qos:
+ return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ struct msm_bus_fab_device_type *fabdev;
+ struct msm_bus_node_device_type *node_dev = NULL;
+ int ret = 0;
+
+ node_dev = dev->platform_data;
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Unable to get bus device info" , __func__);
+ ret = -ENXIO;
+ goto exit_fabric_init;
+ }
+
+ if (node_dev->node_info->virt_dev) {
+ MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+ node_dev->node_info->id);
+ goto exit_fabric_init;
+ }
+
+ fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+ GFP_KERNEL);
+ if (!fabdev) {
+ MSM_BUS_ERR("Fabric alloc failed\n");
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+ node_dev->fabdev = fabdev;
+ fabdev->pqos_base = pdata->fabdev->pqos_base;
+ fabdev->qos_range = pdata->fabdev->qos_range;
+ fabdev->base_offset = pdata->fabdev->base_offset;
+ fabdev->qos_off = pdata->fabdev->qos_off;
+ fabdev->qos_freq = pdata->fabdev->qos_freq;
+ fabdev->bus_type = pdata->fabdev->bus_type;
+ fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+ fabdev->util_fact = pdata->fabdev->util_fact;
+ fabdev->vrail_comp = pdata->fabdev->vrail_comp;
+ msm_bus_fab_init_noc_ops(node_dev);
+
+ fabdev->qos_base = devm_ioremap(dev,
+ fabdev->pqos_base, fabdev->qos_range);
+ if (!fabdev->qos_base) {
+ MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+ __func__,
+ (size_t)fabdev->pqos_base, node_dev->node_info->id);
+ ret = -ENOMEM;
+ goto exit_fabric_init;
+ }
+
+exit_fabric_init:
+ return ret;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+ struct msm_bus_node_device_type *pdata)
+{
+ unsigned int ctx;
+ int ret = 0;
+ struct msm_bus_node_device_type *node_dev = bus_dev->platform_data;
+
+ for (ctx = 0; ctx < NUM_CTX; ctx++) {
+ if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+ node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+ node_dev->clk[ctx].enable = false;
+ node_dev->clk[ctx].dirty = false;
+ MSM_BUS_ERR("%s: Valid node clk node %d ctx %d",
+ __func__, node_dev->node_info->id, ctx);
+ }
+ }
+
+ if (!IS_ERR_OR_NULL(pdata->qos_clk.clk)) {
+ node_dev->qos_clk.clk = pdata->qos_clk.clk;
+ node_dev->qos_clk.enable = false;
+ MSM_BUS_ERR("%s: Valid Iface clk node %d", __func__,
+ node_dev->node_info->id);
+ }
+
+ return ret;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+ struct device *bus_dev)
+{
+ int ret = 0;
+ struct msm_bus_node_info_type *node_info = NULL;
+ struct msm_bus_node_info_type *pdata_node_info = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = bus_dev->platform_data;
+
+ if (!bus_node || !pdata) {
+ ret = -ENXIO;
+ MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+ __func__, pdata, bus_node);
+ goto exit_copy_node_info;
+ }
+
+ node_info = bus_node->node_info;
+ pdata_node_info = pdata->node_info;
+
+ node_info->name = pdata_node_info->name;
+ node_info->id = pdata_node_info->id;
+ node_info->bus_device_id = pdata_node_info->bus_device_id;
+ node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+ node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+ node_info->num_connections = pdata_node_info->num_connections;
+ node_info->num_blist = pdata_node_info->num_blist;
+ node_info->num_qports = pdata_node_info->num_qports;
+ node_info->buswidth = pdata_node_info->buswidth;
+ node_info->virt_dev = pdata_node_info->virt_dev;
+ node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+ node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+ node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+ node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+ node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+ node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+ node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+ node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+ node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+ node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+ node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+
+ node_info->dev_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->dev_connections) {
+ MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->connections = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_connections,
+ GFP_KERNEL);
+ if (!node_info->connections) {
+ MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->connections,
+ pdata_node_info->connections,
+ sizeof(int) * pdata_node_info->num_connections);
+
+ node_info->black_connections = devm_kzalloc(bus_dev,
+ sizeof(struct device *) *
+ pdata_node_info->num_blist,
+ GFP_KERNEL);
+ if (!node_info->black_connections) {
+ MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ node_info->black_listed_connections = devm_kzalloc(bus_dev,
+ pdata_node_info->num_blist * sizeof(int),
+ GFP_KERNEL);
+ if (!node_info->black_listed_connections) {
+ MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+ __func__);
+ devm_kfree(bus_dev, node_info->black_connections);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->black_listed_connections,
+ pdata_node_info->black_listed_connections,
+ sizeof(int) * pdata_node_info->num_blist);
+
+ node_info->qport = devm_kzalloc(bus_dev,
+ sizeof(int) * pdata_node_info->num_qports,
+ GFP_KERNEL);
+ if (!node_info->qport) {
+ MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ devm_kfree(bus_dev, node_info->black_listed_connections);
+ ret = -ENOMEM;
+ goto exit_copy_node_info;
+ }
+
+ memcpy(node_info->qport,
+ pdata_node_info->qport,
+ sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+ return ret;
+}
+
+static struct device *msm_bus_device_init(
+ struct msm_bus_node_device_type *pdata)
+{
+ struct device *bus_dev = NULL;
+ struct msm_bus_node_device_type *bus_node = NULL;
+ struct msm_bus_node_info_type *node_info = NULL;
+ int ret = 0;
+
+ bus_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!bus_dev) {
+ MSM_BUS_ERR("%s:Device alloc failed\n", __func__);
+ bus_dev = NULL;
+ goto exit_device_init;
+ }
+ /**
+ * Init here so we can use devm calls
+ */
+ device_initialize(bus_dev);
+
+ bus_node = devm_kzalloc(bus_dev,
+ sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+ if (!bus_node) {
+ MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
+ kfree(bus_dev);
+ bus_dev = NULL;
+ goto exit_device_init;
+ }
+
+ node_info = devm_kzalloc(bus_dev,
+ sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+ if (!node_info) {
+ MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
+ devm_kfree(bus_dev, bus_node);
+ kfree(bus_dev);
+ bus_dev = NULL;
+ goto exit_device_init;
+ }
+
+ bus_node->node_info = node_info;
+ bus_node->ap_owned = pdata->ap_owned;
+ bus_dev->platform_data = bus_node;
+
+ if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
+ devm_kfree(bus_dev, bus_node);
+ devm_kfree(bus_dev, node_info);
+ kfree(bus_dev);
+ bus_dev = NULL;
+ goto exit_device_init;
+ }
+
+ bus_dev->bus = &msm_bus_type;
+ dev_set_name(bus_dev, bus_node->node_info->name);
+
+ ret = device_add(bus_dev);
+ if (ret < 0) {
+ MSM_BUS_ERR("%s: Error registering device %d",
+ __func__, pdata->node_info->id);
+ devm_kfree(bus_dev, bus_node);
+ devm_kfree(bus_dev, node_info->dev_connections);
+ devm_kfree(bus_dev, node_info->connections);
+ devm_kfree(bus_dev, node_info->black_connections);
+ devm_kfree(bus_dev, node_info->black_listed_connections);
+ devm_kfree(bus_dev, node_info);
+ kfree(bus_dev);
+ bus_dev = NULL;
+ goto exit_device_init;
+ }
+
+exit_device_init:
+ return bus_dev;
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+ int ret = 0;
+ int j;
+
+ bus_node = bus_dev->platform_data;
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+
+ /* Setup parent bus device for this node */
+ if (!bus_node->node_info->is_fab_dev) {
+ struct device *bus_parent_device =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->bus_device_id,
+ msm_bus_device_match_adhoc);
+
+ if (!bus_parent_device) {
+ MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+ __func__,
+ bus_node->node_info->id,
+ bus_node->node_info->bus_device_id);
+ ret = -ENXIO;
+ goto exit_setup_dev_conn;
+ }
+ bus_node->node_info->bus_device = bus_parent_device;
+ }
+
+ bus_node->node_info->is_traversed = false;
+
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ bus_node->node_info->dev_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->connections[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->dev_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+ __func__, bus_node->node_info->connections[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+ for (j = 0; j < bus_node->node_info->num_blist; j++) {
+ bus_node->node_info->black_connections[j] =
+ bus_find_device(&msm_bus_type, NULL,
+ (void *)&bus_node->node_info->
+ black_listed_connections[j],
+ msm_bus_device_match_adhoc);
+
+ if (!bus_node->node_info->black_connections[j]) {
+ MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+ __func__, bus_node->node_info->
+ black_listed_connections[j],
+ bus_node->node_info->id);
+ ret = -ENODEV;
+ goto exit_setup_dev_conn;
+ }
+ }
+
+exit_setup_dev_conn:
+ return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+ int j;
+ int ret = 0;
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = bus_dev->platform_data;
+ if (!bus_node) {
+ MSM_BUS_ERR("%s: Can't get device info", __func__);
+ ret = -ENODEV;
+ goto exit_node_debug;
+ }
+
+ MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+ bus_node->node_info->buswidth);
+ for (j = 0; j < bus_node->node_info->num_connections; j++) {
+ struct msm_bus_node_device_type *bdev =
+ (struct msm_bus_node_device_type *)
+ bus_node->node_info->dev_connections[j]->platform_data;
+ MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+ }
+
+ if (bus_node->node_info->is_fab_dev)
+ msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+ return ret;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+ unsigned int i, ret;
+ struct msm_bus_device_node_registration *pdata;
+
+ /* If possible, get pdata from device-tree */
+ if (pdev->dev.of_node)
+ pdata = msm_bus_of_to_pdata(pdev);
+ else {
+ pdata = (struct msm_bus_device_node_registration *)pdev->
+ dev.platform_data;
+ }
+
+ if (IS_ERR_OR_NULL(pdata)) {
+ MSM_BUS_ERR("No platform data found");
+ ret = -ENODATA;
+ goto exit_device_probe;
+ }
+
+ for (i = 0; i < pdata->num_devices; i++) {
+ struct device *node_dev = NULL;
+
+ node_dev = msm_bus_device_init(&pdata->info[i]);
+
+ if (!node_dev) {
+ MSM_BUS_ERR("%s: Error during dev init for %d",
+ __func__, pdata->info[i].node_info->id);
+ ret = -ENXIO;
+ goto exit_device_probe;
+ }
+
+ ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+ /*Is this a fabric device ?*/
+ if (pdata->info[i].node_info->is_fab_dev) {
+ MSM_BUS_DBG("%s: %d is a fab", __func__,
+ pdata->info[i].node_info->id);
+ ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error intializing fab %d",
+ __func__, pdata->info[i].node_info->id);
+ goto exit_device_probe;
+ }
+ }
+ }
+
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+ msm_bus_setup_dev_conn);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+ goto exit_device_probe;
+ }
+
+ ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+ if (ret) {
+ MSM_BUS_ERR("%s: Error during qos init", __func__);
+ goto exit_device_probe;
+ }
+
+
+ /* Register the arb layer ops */
+ msm_bus_arb_setops_adhoc(&arb_ops);
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+ devm_kfree(&pdev->dev, pdata->info);
+ devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+ return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+ struct bus_rule_type *rule_data = NULL;
+ int num_rules = 0;
+
+ num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+ if (!rule_data)
+ goto exit_rules_probe;
+
+ msm_rule_register(num_rules, rule_data, NULL);
+ static_rules.num_rules = num_rules;
+ static_rules.rules = rule_data;
+ pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+ return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+ struct static_rules_type *static_rules = NULL;
+
+ static_rules = pdev->dev.platform_data;
+ if (static_rules)
+ msm_rule_unregister(static_rules->num_rules,
+ static_rules->rules, NULL);
+ return 0;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+ struct msm_bus_node_device_type *bus_node = NULL;
+
+ bus_node = dev->platform_data;
+
+ if (bus_node)
+ MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+ bus_node->node_info->id);
+ device_unregister(dev);
+ return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+ bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+ return 0;
+}
+
+static struct of_device_id rules_match[] = {
+ {.compatible = "qcom,msm-bus-static-bw-rules"},
+ {}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+ .probe = msm_bus_device_rules_probe,
+ .remove = msm_bus_device_rules_remove,
+ .driver = {
+ .name = "msm_bus_rules_device",
+ .owner = THIS_MODULE,
+ .of_match_table = rules_match,
+ },
+};
+
+static struct of_device_id fabric_match[] = {
+ {.compatible = "qcom,msm-bus-device"},
+ {}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+ .probe = msm_bus_device_probe,
+ .remove = msm_bus_device_remove,
+ .driver = {
+ .name = "msm_bus_device",
+ .owner = THIS_MODULE,
+ .of_match_table = fabric_match,
+ },
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+ int rc;
+
+ MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+ rc = platform_driver_register(&msm_bus_device_driver);
+
+ if (rc) {
+ MSM_BUS_ERR("Failed to register bus device driver");
+ return rc;
+ }
+ return platform_driver_register(&msm_bus_rules_driver);
+}
+subsys_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_id.c b/drivers/soc/qcom/msm_bus/msm_bus_id.c
new file mode 100644
index 0000000000000..3238161c2aad2
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_id.c
@@ -0,0 +1,94 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static uint32_t master_iids[MSM_BUS_MASTER_LAST];
+static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY];
+
+static void msm_bus_assign_iids(struct msm_bus_fabric_registration
+ *fabreg, int fabid)
+{
+ int i;
+ for (i = 0; i < fabreg->len; i++) {
+ if (!fabreg->info[i].gateway) {
+ fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+ if (fabreg->info[i].id < SLAVE_ID_KEY) {
+ if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) {
+ WARN(1, "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ continue;
+ }
+
+ master_iids[fabreg->info[i].id] =
+ fabreg->info[i].priv_id;
+ } else {
+ if ((fabreg->info[i].id - SLAVE_ID_KEY) >=
+ (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) {
+ WARN(1, "id %d exceeds array size!\n",
+ fabreg->info[i].id);
+ continue;
+ }
+
+ slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+ = fabreg->info[i].priv_id;
+ }
+ } else {
+ fabreg->info[i].priv_id = fabreg->info[i].id;
+ }
+ }
+}
+
+static int msm_bus_get_iid(int id)
+{
+ if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) ||
+ id >= MSM_BUS_SLAVE_LAST) {
+ MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+ return -EINVAL;
+ }
+
+ return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+ slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+static struct msm_bus_board_algorithm msm_bus_id_algo = {
+ .get_iid = msm_bus_get_iid,
+ .assign_iids = msm_bus_assign_iids,
+};
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+ return -ENXIO;
+}
+
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata)
+{
+ pdata->board_algo = &msm_bus_id_algo;
+}
+
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+ int nfab)
+{
+ if (nfab <= 0)
+ return;
+
+ msm_bus_id_algo.board_nfab = nfab;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc.c
new file mode 100644
index 0000000000000..b3458df5c11bc
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.c
@@ -0,0 +1,769 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_adhoc.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16 /* 16 bytes minimum for saturation */
+#define BW_SCALE 256 /* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET 0x00003000
+#define QOS_DEFAULT_DELTA 0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+
+#define NOC_QOS_REG_BASE(b, o) ((b) + (o))
+
+#define NOC_QOS_ID_COREIDn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + (d) * (n))
+enum noc_qos_id_coreidn {
+ NOC_QOS_ID_COREIDn_RMSK = 0xffffffff,
+ NOC_QOS_ID_COREIDn_MAXn = 32,
+ NOC_QOS_ID_COREIDn_CORECHSUM_BMSK = 0xffffff00,
+ NOC_QOS_ID_COREIDn_CORECHSUM_SHFT = 0x8,
+ NOC_QOS_ID_COREIDn_CORETYPEID_BMSK = 0xff,
+ NOC_QOS_ID_COREIDn_CORETYPEID_SHFT = 0x0,
+};
+
+#define NOC_QOS_ID_REVISIONIDn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x4 + (d) * (n))
+enum noc_qos_id_revisionidn {
+ NOC_QOS_ID_REVISIONIDn_RMSK = 0xffffffff,
+ NOC_QOS_ID_REVISIONIDn_MAXn = 32,
+ NOC_QOS_ID_REVISIONIDn_FLEXNOCID_BMSK = 0xffffff00,
+ NOC_QOS_ID_REVISIONIDn_FLEXNOCID_SHFT = 0x8,
+ NOC_QOS_ID_REVISIONIDn_USERID_BMSK = 0xff,
+ NOC_QOS_ID_REVISIONIDn_USERID_SHFT = 0x0,
+};
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+ NOC_QOS_PRIORITYn_RMSK = 0x0000000f,
+ NOC_QOS_PRIORITYn_MAXn = 32,
+ NOC_QOS_PRIORITYn_P1_BMSK = 0xc,
+ NOC_QOS_PRIORITYn_P1_SHFT = 0x2,
+ NOC_QOS_PRIORITYn_P0_BMSK = 0x3,
+ NOC_QOS_PRIORITYn_P0_SHFT = 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+ NOC_QOS_MODEn_RMSK = 0x00000003,
+ NOC_QOS_MODEn_MAXn = 32,
+ NOC_QOS_MODEn_MODE_BMSK = 0x3,
+ NOC_QOS_MODEn_MODE_SHFT = 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+ NOC_QOS_BWn_RMSK = 0x0000ffff,
+ NOC_QOS_BWn_MAXn = 32,
+ NOC_QOS_BWn_BW_BMSK = 0xffff,
+ NOC_QOS_BWn_BW_SHFT = 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+ (NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+ NOC_QOS_SATn_RMSK = 0x000003ff,
+ NOC_QOS_SATn_MAXn = 32,
+ NOC_QOS_SATn_SAT_BMSK = 0x3ff,
+ NOC_QOS_SATn_SAT_SHFT = 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+ if ((*a > 0) && (*a < b))
+ return 1;
+ else
+ return do_div(*a, b);
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+ uint64_t res;
+ uint32_t rem, scale;
+
+ res = 2 * qos_freq * bw_field;
+ scale = BW_SCALE * 1000;
+ rem = noc_div(&res, scale);
+ MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+ return res * 1000000ULL;
+}
+
+static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq)
+{
+ uint64_t bw_temp = 2 * qos_freq * bw_field;
+ uint32_t scale = 1000 * BW_SCALE;
+ noc_div(&bw_temp, scale);
+ return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+ if (bw && qos_freq) {
+ uint32_t bwf = bw * qos_freq;
+ uint64_t scale = 1000000000000LL * BW_SCALE *
+ SAT_SCALE * sat;
+ noc_div(&scale, bwf);
+ MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+ return scale;
+ }
+
+ return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth */
+static uint32_t noc_bw_field(uint64_t bw, uint32_t qos_freq)
+{
+ uint32_t bw_field = 0;
+
+ if (bw) {
+ uint32_t rem;
+ uint64_t bw_capped = min_t(uint64_t, bw, MAX_BW(qos_freq));
+ uint64_t bwc = bw_capped * BW_SCALE;
+ uint64_t qf = 2 * qos_freq * 1000;
+
+ rem = noc_div(&bwc, qf);
+ bw_field = (uint32_t)min_t(uint64_t, bwc, MAX_BW_FIELD);
+ }
+
+ MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+ return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+ uint32_t sat_field = 0, win;
+
+ if (bw) {
+ /* Limit to max bw and scale bw to 100 KB increments */
+ uint64_t tbw, tscale;
+ uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+ uint32_t rem = noc_div(&bw_scaled, 100000);
+
+ /**
+ * Calculate saturation from windows size.
+ * WS must be at least one arb period.
+ * Saturation must not exceed max field size
+ *
+ * Bandwidth is in 100KB increments
+ * Window size is in ns
+ * qos_freq is in KHz
+ */
+ win = max(ws, 1000000 / qos_freq);
+ tbw = bw_scaled * win * qos_freq;
+ tscale = 10000000ULL * BW_SCALE * SAT_SCALE;
+ rem = noc_div(&tbw, tscale);
+ sat_field = (uint32_t)min_t(uint64_t, tbw, MAX_SAT_FIELD);
+ }
+
+ MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+ return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint8_t mode,
+ uint8_t perm_mode)
+{
+ if (mode < NOC_QOS_MODE_MAX &&
+ ((1 << mode) & perm_mode)) {
+ uint32_t reg_val;
+
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+ writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+ (mode & NOC_QOS_MODEn_MODE_BMSK)),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+ }
+ /* Ensure qos mode is set before exiting */
+ wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *priority)
+{
+ uint32_t reg_val, val;
+
+ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+ qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+ val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+ (val & NOC_QOS_PRIORITYn_P1_BMSK)),
+ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+ reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+ qos_delta))
+ & NOC_QOS_PRIORITYn_RMSK;
+ writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+ (priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+ NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+ /* Ensure qos priority is set before exiting */
+ wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+ uint32_t reg_val, val, mode;
+
+ if (!qos_freq) {
+ MSM_BUS_DBG("Zero QoS Freq\n");
+ return;
+ }
+
+
+ /* If Limiter or Regulator modes are not supported, bw not available*/
+ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+ NOC_QOS_PERM_MODE_REGULATOR)) {
+ uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+ uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+ qos_freq);
+
+ MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+ perm_mode, bw_val, sat_val);
+ /*
+ * If in Limiter/Regulator mode, first go to fixed mode.
+ * Clear QoS accumulator
+ **/
+ mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+ if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+ NOC_QOS_MODE_LIMITER) {
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+ base, qos_off, mport, qos_delta));
+ val = NOC_QOS_MODE_FIXED;
+ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+ | (val & NOC_QOS_MODEn_MODE_BMSK),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+ qos_delta));
+ }
+
+ reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+ qos_delta));
+ val = bw_val << NOC_QOS_BWn_BW_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+ (val & NOC_QOS_BWn_BW_BMSK)),
+ NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+ MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+ (~NOC_QOS_BWn_BW_BMSK)) | (val &
+ NOC_QOS_BWn_BW_BMSK)));
+
+ reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+ mport, qos_delta));
+ val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+ writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+ (val & NOC_QOS_SATn_SAT_BMSK)),
+ NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+ MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+ (~NOC_QOS_SATn_SAT_BMSK)) | (val &
+ NOC_QOS_SATn_SAT_BMSK)));
+
+ /* Set mode back to what it was initially */
+ reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta));
+ writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+ | (mode & NOC_QOS_MODEn_MODE_BMSK),
+ NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+ /* Ensure that all writes for bandwidth registers have
+ * completed before returning
+ */
+ wmb();
+ }
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+ if (NOC_QOS_MODES_ALL_PERM == perm_mode)
+ return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+ else
+ return 31 - __CLZ(mode &
+ NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *priority)
+{
+ priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+ NOC_QOS_PRIORITYn_P1_SHFT;
+
+ priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+ mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+ NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq,
+ uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+ struct msm_bus_noc_qos_bw *qbw)
+{
+ if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+ NOC_QOS_PERM_MODE_REGULATOR)) {
+ uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+ base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+ uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+ base, qos_off, mport, qos_delta))
+ & NOC_QOS_SATn_SAT_BMSK;
+
+ qbw->bw = noc_bw(bw_val, qos_freq);
+ qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+ } else {
+ qbw->bw = 0;
+ qbw->ws = 0;
+ }
+}
+
+static int msm_bus_noc_mas_init(struct msm_bus_noc_info *ninfo,
+ struct msm_bus_inode_info *info)
+{
+ int i;
+ struct msm_bus_noc_qos_priority *prio;
+ prio = kzalloc(sizeof(struct msm_bus_noc_qos_priority),
+ GFP_KERNEL);
+ if (!prio) {
+ MSM_BUS_WARN("Couldn't alloc prio data for node: %d\n",
+ info->node_info->id);
+ return -ENOMEM;
+ }
+
+ prio->read_prio = info->node_info->prio_rd;
+ prio->write_prio = info->node_info->prio_wr;
+ prio->p1 = info->node_info->prio1;
+ prio->p0 = info->node_info->prio0;
+ info->hw_data = (void *)prio;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ return 0;
+ }
+
+ for (i = 0; i < info->node_info->num_mports; i++) {
+ if (info->node_info->mode != NOC_QOS_MODE_BYPASS) {
+ noc_set_qos_priority(ninfo->base, ninfo->qos_baseoffset,
+ info->node_info->qport[i], ninfo->qos_delta,
+ prio);
+
+ if (info->node_info->mode != NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_bw qbw;
+ qbw.ws = info->node_info->ws;
+ qbw.bw = 0;
+ msm_bus_noc_set_qos_bw(ninfo->base,
+ ninfo->qos_baseoffset,
+ ninfo->qos_freq, info->node_info->
+ qport[i], ninfo->qos_delta,
+ info->node_info->perm_mode,
+ &qbw);
+ }
+ }
+
+ noc_set_qos_mode(ninfo->base, ninfo->qos_baseoffset,
+ info->node_info->qport[i], ninfo->qos_delta,
+ info->node_info->mode,
+ info->node_info->perm_mode);
+ }
+
+ return 0;
+}
+
+static void msm_bus_noc_node_init(void *hw_data,
+ struct msm_bus_inode_info *info)
+{
+ struct msm_bus_noc_info *ninfo =
+ (struct msm_bus_noc_info *)hw_data;
+
+ if (!IS_SLAVE(info->node_info->priv_id))
+ if (info->node_info->hw_sel != MSM_BUS_RPM)
+ msm_bus_noc_mas_init(ninfo, info);
+}
+
+static int msm_bus_noc_allocate_commit_data(struct msm_bus_fabric_registration
+ *fab_pdata, void **cdata, int ctx)
+{
+ struct msm_bus_noc_commit **cd = (struct msm_bus_noc_commit **)cdata;
+ struct msm_bus_noc_info *ninfo =
+ (struct msm_bus_noc_info *)fab_pdata->hw_data;
+
+ *cd = kzalloc(sizeof(struct msm_bus_noc_commit), GFP_KERNEL);
+ if (!*cd) {
+ MSM_BUS_DBG("Couldn't alloc mem for cdata\n");
+ return -ENOMEM;
+ }
+
+ (*cd)->mas = ninfo->cdata[ctx].mas;
+ (*cd)->slv = ninfo->cdata[ctx].slv;
+
+ return 0;
+}
+
+static void *msm_bus_noc_allocate_noc_data(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *fab_pdata)
+{
+ struct resource *noc_mem;
+ struct resource *noc_io;
+ struct msm_bus_noc_info *ninfo;
+ int i;
+
+ ninfo = kzalloc(sizeof(struct msm_bus_noc_info), GFP_KERNEL);
+ if (!ninfo) {
+ MSM_BUS_DBG("Couldn't alloc mem for noc info\n");
+ return NULL;
+ }
+
+ ninfo->nmasters = fab_pdata->nmasters;
+ ninfo->nqos_masters = fab_pdata->nmasters;
+ ninfo->nslaves = fab_pdata->nslaves;
+ ninfo->qos_freq = fab_pdata->qos_freq;
+
+ if (!fab_pdata->qos_baseoffset)
+ ninfo->qos_baseoffset = QOS_DEFAULT_BASEOFFSET;
+ else
+ ninfo->qos_baseoffset = fab_pdata->qos_baseoffset;
+
+ if (!fab_pdata->qos_delta)
+ ninfo->qos_delta = QOS_DEFAULT_DELTA;
+ else
+ ninfo->qos_delta = fab_pdata->qos_delta;
+
+ ninfo->mas_modes = kzalloc(sizeof(uint32_t) * fab_pdata->nmasters,
+ GFP_KERNEL);
+ if (!ninfo->mas_modes) {
+ MSM_BUS_DBG("Couldn't alloc mem for noc master-modes\n");
+ return NULL;
+ }
+
+ for (i = 0; i < NUM_CTX; i++) {
+ ninfo->cdata[i].mas = kzalloc(sizeof(struct
+ msm_bus_node_hw_info) * fab_pdata->nmasters * 2,
+ GFP_KERNEL);
+ if (!ninfo->cdata[i].mas) {
+ MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n");
+ kfree(ninfo->mas_modes);
+ kfree(ninfo);
+ return NULL;
+ }
+
+ ninfo->cdata[i].slv = kzalloc(sizeof(struct
+ msm_bus_node_hw_info) * fab_pdata->nslaves * 2,
+ GFP_KERNEL);
+ if (!ninfo->cdata[i].slv) {
+ MSM_BUS_DBG("Couldn't alloc mem for noc master-bw\n");
+ kfree(ninfo->cdata[i].mas);
+ goto err;
+ }
+ }
+
+ /* If it's a virtual fabric, don't get memory info */
+ if (fab_pdata->virt)
+ goto skip_mem;
+
+ noc_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!noc_mem && !fab_pdata->virt) {
+ MSM_BUS_ERR("Cannot get NoC Base address\n");
+ goto err;
+ }
+
+ noc_io = request_mem_region(noc_mem->start,
+ resource_size(noc_mem), pdev->name);
+ if (!noc_io) {
+ MSM_BUS_ERR("NoC memory unavailable\n");
+ goto err;
+ }
+
+ ninfo->base = ioremap(noc_mem->start, resource_size(noc_mem));
+ if (!ninfo->base) {
+ MSM_BUS_ERR("IOremap failed for NoC!\n");
+ release_mem_region(noc_mem->start, resource_size(noc_mem));
+ goto err;
+ }
+
+skip_mem:
+ fab_pdata->hw_data = (void *)ninfo;
+ return (void *)ninfo;
+
+err:
+ kfree(ninfo->mas_modes);
+ kfree(ninfo);
+ return NULL;
+}
+
+static void free_commit_data(void *cdata)
+{
+ struct msm_bus_noc_commit *cd = (struct msm_bus_noc_commit *)cdata;
+
+ kfree(cd->mas);
+ kfree(cd->slv);
+ kfree(cd);
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+ bool ret = false;
+
+ if ((mode == NOC_QOS_MODE_LIMITER) ||
+ (mode == NOC_QOS_MODE_REGULATOR))
+ ret = true;
+
+ return ret;
+}
+
+static void msm_bus_noc_update_bw(struct msm_bus_inode_info *hop,
+ struct msm_bus_inode_info *info,
+ struct msm_bus_fabric_registration *fab_pdata,
+ void *sel_cdata, int *master_tiers,
+ int64_t add_bw)
+{
+ struct msm_bus_noc_info *ninfo;
+ struct msm_bus_noc_qos_bw qos_bw;
+ int i, ports;
+ int64_t bw;
+ struct msm_bus_noc_commit *sel_cd =
+ (struct msm_bus_noc_commit *)sel_cdata;
+
+ ninfo = (struct msm_bus_noc_info *)fab_pdata->hw_data;
+ if (!ninfo->qos_freq) {
+ MSM_BUS_DBG("NOC: No qos frequency to update bw\n");
+ return;
+ }
+
+ if (info->node_info->num_mports == 0) {
+ MSM_BUS_DBG("NOC: Skip Master BW\n");
+ goto skip_mas_bw;
+ }
+
+ ports = info->node_info->num_mports;
+ bw = INTERLEAVED_BW(fab_pdata, add_bw, ports);
+
+ MSM_BUS_DBG("NOC: Update bw for: %d: %lld\n",
+ info->node_info->priv_id, add_bw);
+ for (i = 0; i < ports; i++) {
+ sel_cd->mas[info->node_info->masterp[i]].bw += bw;
+ sel_cd->mas[info->node_info->masterp[i]].hw_id =
+ info->node_info->mas_hw_id;
+ MSM_BUS_DBG("NOC: Update mas_bw: ID: %d, BW: %llu ports:%d\n",
+ info->node_info->priv_id,
+ sel_cd->mas[info->node_info->masterp[i]].bw,
+ ports);
+ /* Check if info is a shared master.
+ * If it is, mark it dirty
+ * If it isn't, then set QOS Bandwidth
+ **/
+ if (info->node_info->hw_sel == MSM_BUS_RPM)
+ sel_cd->mas[info->node_info->masterp[i]].dirty = 1;
+ else {
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+
+ if (!(info->node_info->mode == NOC_QOS_MODE_REGULATOR)
+ || (info->node_info->mode ==
+ NOC_QOS_MODE_LIMITER)) {
+ MSM_BUS_DBG("Skip QoS reg programming\n");
+ break;
+ }
+ qos_bw.bw = sel_cd->mas[info->node_info->masterp[i]].
+ bw;
+ qos_bw.ws = info->node_info->ws;
+ msm_bus_noc_set_qos_bw(ninfo->base,
+ ninfo->qos_baseoffset,
+ ninfo->qos_freq,
+ info->node_info->qport[i], ninfo->qos_delta,
+ info->node_info->perm_mode, &qos_bw);
+ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+ qos_bw.ws);
+ }
+ }
+
+skip_mas_bw:
+ ports = hop->node_info->num_sports;
+ for (i = 0; i < ports; i++) {
+ sel_cd->slv[hop->node_info->slavep[i]].bw += add_bw;
+ sel_cd->slv[hop->node_info->slavep[i]].hw_id =
+ hop->node_info->slv_hw_id;
+ MSM_BUS_DBG("NOC: Update slave_bw for ID: %d -> %llu\n",
+ hop->node_info->priv_id,
+ sel_cd->slv[hop->node_info->slavep[i]].bw);
+ MSM_BUS_DBG("NOC: Update slave_bw for hw_id: %d, index: %d\n",
+ hop->node_info->slv_hw_id, hop->node_info->slavep[i]);
+ /* Check if hop is a shared slave.
+ * If it is, mark it dirty
+ * If it isn't, then nothing to be done as the
+ * slaves are in bypass mode.
+ **/
+ if (hop->node_info->hw_sel == MSM_BUS_RPM)
+ sel_cd->slv[hop->node_info->slavep[i]].dirty = 1;
+ }
+}
+
+static int msm_bus_noc_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+ MSM_BUS_DBG("\nReached NOC Commit\n");
+ msm_bus_remote_hw_commit(fab_pdata, hw_data, cdata);
+ return 0;
+}
+
+static int msm_bus_noc_port_halt(uint32_t haltid, uint8_t mport)
+{
+ return 0;
+}
+
+static int msm_bus_noc_port_unhalt(uint32_t haltid, uint8_t mport)
+{
+ return 0;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ struct msm_bus_noc_qos_priority prio;
+ int ret = 0;
+ int i;
+
+ prio.p1 = info->node_info->qos_params.prio1;
+ prio.p0 = info->node_info->qos_params.prio0;
+
+ if (!info->node_info->qport) {
+ MSM_BUS_DBG("No QoS Ports to init\n");
+ ret = 0;
+ goto err_qos_init;
+ }
+
+ for (i = 0; i < info->node_info->num_qports; i++) {
+ if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+ noc_set_qos_priority(qos_base, qos_off,
+ info->node_info->qport[i], qos_delta,
+ &prio);
+
+ if (info->node_info->qos_params.mode !=
+ NOC_QOS_MODE_FIXED) {
+ struct msm_bus_noc_qos_bw qbw;
+ qbw.ws = info->node_info->qos_params.ws;
+ qbw.bw = 0;
+ msm_bus_noc_set_qos_bw(qos_base, qos_off,
+ qos_freq,
+ info->node_info->qport[i],
+ qos_delta,
+ info->node_info->qos_params.mode,
+ &qbw);
+ }
+ }
+
+ noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+ qos_delta, info->node_info->qos_params.mode,
+ (1 << info->node_info->qos_params.mode));
+ }
+err_qos_init:
+ return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+ void __iomem *qos_base,
+ uint32_t qos_off, uint32_t qos_delta,
+ uint32_t qos_freq)
+{
+ int ret = 0;
+ uint64_t bw = 0;
+ int i;
+ struct msm_bus_node_info_type *info = dev->node_info;
+
+ if (info && info->num_qports &&
+ ((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+ (info->qos_params.mode ==
+ NOC_QOS_MODE_LIMITER))) {
+ struct msm_bus_noc_qos_bw qos_bw;
+
+ bw = msm_bus_div64(info->num_qports,
+ dev->node_ab.ab[DUAL_CTX]);
+
+ for (i = 0; i < info->num_qports; i++) {
+ if (!info->qport) {
+ MSM_BUS_DBG("No qos ports to update!\n");
+ break;
+ }
+
+ qos_bw.bw = bw;
+ qos_bw.ws = info->qos_params.ws;
+ msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+ info->qport[i], qos_delta,
+ info->qos_params.mode, &qos_bw);
+ MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+ qos_bw.ws);
+ }
+ }
+ return ret;
+}
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ /* Set interleaving to true by default */
+ pdata->il_flag = true;
+ hw_algo->allocate_commit_data = msm_bus_noc_allocate_commit_data;
+ hw_algo->allocate_hw_data = msm_bus_noc_allocate_noc_data;
+ hw_algo->node_init = msm_bus_noc_node_init;
+ hw_algo->free_commit_data = free_commit_data;
+ hw_algo->update_bw = msm_bus_noc_update_bw;
+ hw_algo->commit = msm_bus_noc_commit;
+ hw_algo->port_halt = msm_bus_noc_port_halt;
+ hw_algo->port_unhalt = msm_bus_noc_port_unhalt;
+ hw_algo->update_bw_reg = msm_bus_noc_update_bw_reg;
+ hw_algo->config_master = NULL;
+ hw_algo->config_limiter = NULL;
+
+ return 0;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+ if (!bus_dev)
+ return -ENODEV;
+ else {
+ bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+ bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+ bus_dev->fabdev->noc_ops.limit_mport = NULL;
+ bus_dev->fabdev->noc_ops.update_bw_reg =
+ msm_bus_noc_update_bw_reg;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
new file mode 100644
index 0000000000000..3995f63e79e3e
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+enum msm_bus_noc_qos_mode_type {
+ NOC_QOS_MODE_FIXED = 0,
+ NOC_QOS_MODE_LIMITER,
+ NOC_QOS_MODE_BYPASS,
+ NOC_QOS_MODE_REGULATOR,
+ NOC_QOS_MODE_MAX,
+};
+
+enum msm_bus_noc_qos_mode_perm {
+ NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
+ NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
+ NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
+ NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
+};
+
+#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
+ NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
+ NOC_QOS_PERM_MODE_REGULATOR)
+
+struct msm_bus_noc_commit {
+ struct msm_bus_node_hw_info *mas;
+ struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_noc_info {
+ void __iomem *base;
+ uint32_t base_addr;
+ uint32_t nmasters;
+ uint32_t nqos_masters;
+ uint32_t nslaves;
+ uint32_t qos_freq; /* QOS Clock in KHz */
+ uint32_t qos_baseoffset;
+ uint32_t qos_delta;
+ uint32_t *mas_modes;
+ struct msm_bus_noc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_noc_qos_priority {
+ uint32_t high_prio;
+ uint32_t low_prio;
+ uint32_t read_prio;
+ uint32_t write_prio;
+ uint32_t p1;
+ uint32_t p0;
+};
+
+struct msm_bus_noc_qos_bw {
+ uint64_t bw; /* Bandwidth in bytes per second */
+ uint32_t ws; /* Window size in nano seconds */
+};
+
+void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+ uint32_t mport, uint32_t qos_delta,
+ struct msm_bus_noc_qos_priority *qprio);
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+ uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+ uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
+#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c
new file mode 100644
index 0000000000000..b625a6c8336eb
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c
@@ -0,0 +1,703 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+
+static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
+static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
+ "Regulator", NULL};
+
+static int get_num(const char *const str[], const char *name)
+{
+ int i = 0;
+
+ do {
+ if (!strcmp(name, str[i]))
+ return i;
+
+ i++;
+ } while (str[i] != NULL);
+
+ pr_err("Error: string %s not found\n", name);
+ return -EINVAL;
+}
+
+static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+ struct msm_bus_paths *usecase = NULL;
+ int i = 0, j, ret, num_usecases = 0, num_paths, len;
+ const uint32_t *vec_arr = NULL;
+ bool mem_err = false;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
+ GFP_KERNEL);
+ if (!pdata) {
+ pr_err("Error: Memory allocation for pdata failed\n");
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_string(of_node, "qcom,msm-bus,name",
+ &pdata->name);
+ if (ret) {
+ pr_err("Error: Client name not found\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+ &num_usecases);
+ if (ret) {
+ pr_err("Error: num-usecases not found\n");
+ goto err;
+ }
+
+ pdata->num_usecases = num_usecases;
+
+ if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+ pdata->active_only = 1;
+ else {
+ pr_debug("active_only flag absent.\n");
+ pr_debug("Using dual context by default\n");
+ }
+
+ usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
+ pdata->num_usecases), GFP_KERNEL);
+ if (!usecase) {
+ pr_err("Error: Memory allocation for paths failed\n");
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+ &num_paths);
+ if (ret) {
+ pr_err("Error: num_paths not found\n");
+ goto err;
+ }
+
+ vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
+ if (vec_arr == NULL) {
+ pr_err("Error: Vector array not found\n");
+ goto err;
+ }
+
+ if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
+ pr_err("Error: Length-error on getting vectors\n");
+ goto err;
+ }
+
+ for (i = 0; i < num_usecases; i++) {
+ usecase[i].num_paths = num_paths;
+ usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
+ sizeof(struct msm_bus_vectors), GFP_KERNEL);
+ if (!usecase[i].vectors) {
+ mem_err = true;
+ pr_err("Error: Mem alloc failure in vectors\n");
+ goto err;
+ }
+
+ for (j = 0; j < num_paths; j++) {
+ int index = ((i * num_paths) + j) * 4;
+ usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
+ usecase[i].vectors[j].dst =
+ be32_to_cpu(vec_arr[index + 1]);
+ usecase[i].vectors[j].ab = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index + 2]));
+ usecase[i].vectors[j].ib = (uint64_t)
+ KBTOB(be32_to_cpu(vec_arr[index + 3]));
+ }
+ }
+
+ pdata->usecase = usecase;
+ return pdata;
+err:
+ if (mem_err) {
+ for (; i > 0; i--)
+ kfree(usecase[i-1].vectors);
+
+ kfree(usecase);
+ kfree(pdata);
+ }
+
+ return NULL;
+}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+ pdata = get_pdata(pdev, of_node);
+ if (!pdata) {
+ pr_err("client has to provide missing entry for successful registration\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata);
+
+/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ struct msm_bus_scale_pdata *pdata = NULL;
+
+ if (!pdev) {
+ pr_err("Error: Null Platform device\n");
+ return NULL;
+ }
+
+ if (!of_node) {
+ pr_err("Error: Null of_node passed to bus driver\n");
+ return NULL;
+ }
+
+ pdata = get_pdata(pdev, of_node);
+ if (!pdata) {
+ pr_err("client has to provide missing entry for successful registration\n");
+ return NULL;
+ }
+
+ return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+/**
+ * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
+ * of_node: Device tree node to extract information from
+ */
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+ int i;
+
+ for (i = 0; i < pdata->num_usecases; i++)
+ kfree(pdata->usecase[i].vectors);
+
+ kfree(pdata->usecase);
+ kfree(pdata);
+}
+EXPORT_SYMBOL(msm_bus_cl_clear_pdata);
+
+static int *get_arr(struct platform_device *pdev,
+ const struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ int *arr = NULL;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ pr_debug("Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ if (!size) {
+ *nports = 0;
+ return NULL;
+ }
+
+ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(arr)) {
+ pr_err("Error: Failed to alloc mem for %s\n", prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ pr_err("Error in reading property: %s\n", prop);
+ goto err;
+ }
+
+ return arr;
+err:
+ devm_kfree(&pdev->dev, arr);
+ return NULL;
+}
+
+static u64 *get_th_params(struct platform_device *pdev,
+ const struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ u64 *ret_arr = NULL;
+ int *arr = NULL;
+ int i;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ pr_debug("Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ if (!size) {
+ *nports = 0;
+ return NULL;
+ }
+
+ ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+ GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(ret_arr)) {
+ pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
+ return NULL;
+ }
+
+ arr = kzalloc(size, GFP_KERNEL);
+ if ((ZERO_OR_NULL_PTR(arr))) {
+ pr_err("Error: Failed to alloc temp mem for %s\n", prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ pr_err("Error in reading property: %s\n", prop);
+ goto err;
+ }
+
+ for (i = 0; i < *nports; i++)
+ ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+ MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+ for (i = 0; i < *nports; i++)
+ MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+ kfree(arr);
+ return ret_arr;
+err:
+ kfree(arr);
+ devm_kfree(&pdev->dev, ret_arr);
+ return NULL;
+}
+
+static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
+ struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+ struct msm_bus_node_info *info;
+ struct device_node *child_node = NULL;
+ int i = 0, ret;
+ int num_bw = 0;
+ u32 temp;
+
+ for_each_child_of_node(of_node, child_node) {
+ i++;
+ }
+
+ pdata->len = i;
+ info = (struct msm_bus_node_info *)
+ devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
+ pdata->len, GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(info)) {
+ pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
+ goto err;
+ }
+
+ i = 0;
+ child_node = NULL;
+ for_each_child_of_node(of_node, child_node) {
+ const char *sel_str;
+
+ ret = of_property_read_string(child_node, "label",
+ &info[i].name);
+ if (ret)
+ pr_err("Error reading node label\n");
+
+ ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
+ if (ret) {
+ pr_err("Error reading node id\n");
+ goto err;
+ }
+
+ if (of_property_read_bool(child_node, "qcom,gateway"))
+ info[i].gateway = 1;
+
+ of_property_read_u32(child_node, "qcom,mas-hw-id",
+ &info[i].mas_hw_id);
+
+ of_property_read_u32(child_node, "qcom,slv-hw-id",
+ &info[i].slv_hw_id);
+ info[i].masterp = get_arr(pdev, child_node,
+ "qcom,masterp", &info[i].num_mports);
+ /* No need to store number of qports */
+ info[i].qport = get_arr(pdev, child_node,
+ "qcom,qport", &ret);
+ pdata->nmasters += info[i].num_mports;
+
+
+ info[i].slavep = get_arr(pdev, child_node,
+ "qcom,slavep", &info[i].num_sports);
+ pdata->nslaves += info[i].num_sports;
+
+
+ info[i].tier = get_arr(pdev, child_node,
+ "qcom,tier", &info[i].num_tiers);
+
+ if (of_property_read_bool(child_node, "qcom,ahb"))
+ info[i].ahb = 1;
+
+ ret = of_property_read_string(child_node, "qcom,hw-sel",
+ &sel_str);
+ if (ret)
+ info[i].hw_sel = 0;
+ else {
+ ret = get_num(hw_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Invalid hw-sel\n");
+ goto err;
+ }
+
+ info[i].hw_sel = ret;
+ }
+
+ of_property_read_u32(child_node, "qcom,buswidth",
+ &info[i].buswidth);
+ of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
+
+ info[i].dual_conf =
+ of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+ info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+ &info[i].num_thresh);
+
+ info[i].bimc_bw = get_th_params(pdev, child_node,
+ "qcom,bimc,bw", &num_bw);
+
+ if (num_bw != info[i].num_thresh) {
+ pr_err("%s:num_bw %d must equal num_thresh %d",
+ __func__, num_bw, info[i].num_thresh);
+ pr_err("%s:Err setting up dual conf for %s",
+ __func__, info[i].name);
+ goto err;
+ }
+
+ of_property_read_u32(child_node, "qcom,bimc,gp",
+ &info[i].bimc_gp);
+ of_property_read_u32(child_node, "qcom,bimc,thmp",
+ &info[i].bimc_thmp);
+
+ ret = of_property_read_string(child_node, "qcom,mode-thresh",
+ &sel_str);
+ if (ret)
+ info[i].mode_thresh = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Unknown mode :%s\n", sel_str);
+ goto err;
+ }
+
+ info[i].mode_thresh = ret;
+ MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
+ info[i].mode_thresh);
+ }
+
+ ret = of_property_read_string(child_node, "qcom,mode",
+ &sel_str);
+
+ if (ret)
+ info[i].mode = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0) {
+ pr_err("Unknown mode :%s\n", sel_str);
+ goto err;
+ }
+
+ info[i].mode = ret;
+ }
+
+ info[i].nr_lim =
+ of_property_read_bool(child_node, "qcom,nr-lim");
+
+ ret = of_property_read_u32(child_node, "qcom,ff",
+ &info[i].ff);
+ if (ret) {
+ pr_debug("fudge factor not present %d", info[i].id);
+ info[i].ff = 0;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,floor-bw",
+ &temp);
+ if (ret) {
+ pr_debug("fabdev floor bw not present %d", info[i].id);
+ info[i].floor_bw = 0;
+ } else {
+ info[i].floor_bw = KBTOB(temp);
+ }
+
+ info[i].rt_mas =
+ of_property_read_bool(child_node, "qcom,rt-mas");
+
+ ret = of_property_read_string(child_node, "qcom,perm-mode",
+ &sel_str);
+ if (ret)
+ info[i].perm_mode = 0;
+ else {
+ ret = get_num(mode_sel_name, sel_str);
+ if (ret < 0)
+ goto err;
+
+ info[i].perm_mode = 1 << ret;
+ }
+
+ of_property_read_u32(child_node, "qcom,prio-lvl",
+ &info[i].prio_lvl);
+ of_property_read_u32(child_node, "qcom,prio-rd",
+ &info[i].prio_rd);
+ of_property_read_u32(child_node, "qcom,prio-wr",
+ &info[i].prio_wr);
+ of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
+ of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
+ ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
+ &info[i].slaveclk[DUAL_CTX]);
+ if (!ret)
+ pr_debug("Got slaveclk_dual: %s\n",
+ info[i].slaveclk[DUAL_CTX]);
+ else
+ info[i].slaveclk[DUAL_CTX] = NULL;
+
+ ret = of_property_read_string(child_node,
+ "qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
+ if (!ret)
+ pr_debug("Got slaveclk_active\n");
+ else
+ info[i].slaveclk[ACTIVE_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,memclk-dual",
+ &info[i].memclk[DUAL_CTX]);
+ if (!ret)
+ pr_debug("Got memclk_dual\n");
+ else
+ info[i].memclk[DUAL_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,memclk-active",
+ &info[i].memclk[ACTIVE_CTX]);
+ if (!ret)
+ pr_debug("Got memclk_active\n");
+ else
+ info[i].memclk[ACTIVE_CTX] = NULL;
+
+ ret = of_property_read_string(child_node, "qcom,iface-clk-node",
+ &info[i].iface_clk_node);
+ if (!ret)
+ pr_debug("Got iface_clk_node\n");
+ else
+ info[i].iface_clk_node = NULL;
+
+ pr_debug("Node name: %s\n", info[i].name);
+ of_node_put(child_node);
+ i++;
+ }
+
+ pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
+ pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
+ return info;
+err:
+ return NULL;
+}
+
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+ struct msm_bus_fabric_registration *pdata)
+{
+ struct device_node *of_node;
+ int ret, nfab = 0;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return;
+ }
+
+ of_node = pdev->dev.of_node;
+ ret = of_property_read_u32(of_node, "qcom,nfab",
+ &nfab);
+ if (!ret)
+ pr_debug("Fab_of: Read number of buses: %u\n", nfab);
+
+ msm_bus_board_set_nfab(pdata, nfab);
+}
+
+struct msm_bus_fabric_registration
+ *msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct msm_bus_fabric_registration *pdata;
+ bool mem_err = false;
+ int ret = 0;
+ const char *sel_str;
+ u32 temp;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
+ if (!pdata) {
+ pr_err("Error: Memory allocation for pdata failed\n");
+ mem_err = true;
+ goto err;
+ }
+
+ ret = of_property_read_string(of_node, "label", &pdata->name);
+ if (ret) {
+ pr_err("Error: label not found\n");
+ goto err;
+ }
+ pr_debug("Fab_of: Read name: %s\n", pdata->name);
+
+ ret = of_property_read_u32(of_node, "cell-id",
+ &pdata->id);
+ if (ret) {
+ pr_err("Error: num-usecases not found\n");
+ goto err;
+ }
+ pr_debug("Fab_of: Read id: %u\n", pdata->id);
+
+ if (of_property_read_bool(of_node, "qcom,ahb"))
+ pdata->ahb = 1;
+
+ ret = of_property_read_string(of_node, "qcom,fabclk-dual",
+ &pdata->fabclk[DUAL_CTX]);
+ if (ret) {
+ pr_debug("fabclk_dual not available\n");
+ pdata->fabclk[DUAL_CTX] = NULL;
+ } else
+ pr_debug("Fab_of: Read clk dual ctx: %s\n",
+ pdata->fabclk[DUAL_CTX]);
+ ret = of_property_read_string(of_node, "qcom,fabclk-active",
+ &pdata->fabclk[ACTIVE_CTX]);
+ if (ret) {
+ pr_debug("Error: fabclk_active not available\n");
+ pdata->fabclk[ACTIVE_CTX] = NULL;
+ } else
+ pr_debug("Fab_of: Read clk act ctx: %s\n",
+ pdata->fabclk[ACTIVE_CTX]);
+
+ ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
+ &pdata->ntieredslaves);
+ if (ret) {
+ pr_err("Error: ntieredslaves not found\n");
+ goto err;
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
+ if (ret)
+ pr_debug("qos_freq not available\n");
+
+ ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
+ if (ret) {
+ pr_err("Error: hw_sel not found\n");
+ goto err;
+ } else {
+ ret = get_num(hw_sel_name, sel_str);
+ if (ret < 0)
+ goto err;
+
+ pdata->hw_sel = ret;
+ }
+
+ if (of_property_read_bool(of_node, "qcom,virt"))
+ pdata->virt = true;
+
+ ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
+ &pdata->qos_baseoffset);
+ if (ret)
+ pr_debug("%s:qos_baseoffset not available\n", __func__);
+
+ ret = of_property_read_u32(of_node, "qcom,qos-delta",
+ &pdata->qos_delta);
+ if (ret)
+ pr_debug("%s:qos_delta not available\n", __func__);
+
+ if (of_property_read_bool(of_node, "qcom,rpm-en"))
+ pdata->rpm_enabled = 1;
+
+ ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
+ &temp);
+
+ if (ret) {
+ pr_err("nr-lim threshold not specified");
+ pdata->nr_lim_thresh = 0;
+ } else {
+ pdata->nr_lim_thresh = KBTOB(temp);
+ }
+
+ ret = of_property_read_u32(of_node, "qcom,eff-fact",
+ &pdata->eff_fact);
+ if (ret) {
+ pr_err("Fab eff-factor not present");
+ pdata->eff_fact = 0;
+ }
+
+ pdata->info = get_nodes(of_node, pdev, pdata);
+ return pdata;
+err:
+ return NULL;
+}
+EXPORT_SYMBOL(msm_bus_of_get_fab_data);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
new file mode 100644
index 0000000000000..109febcaa68cc
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -0,0 +1,642 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define DEFAULT_QOS_FREQ 19200
+#define DEFAULT_UTIL_FACT 100
+#define DEFAULT_VRAIL_COMP 100
+
+static int get_qos_mode(struct platform_device *pdev,
+ struct device_node *node, const char *qos_mode)
+{
+ const char *qos_names[] = {"fixed", "limiter", "bypass", "regulator"};
+ int i = 0;
+ int ret = -1;
+
+ if (!qos_mode)
+ goto exit_get_qos_mode;
+
+ for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+ if (!strcmp(qos_mode, qos_names[i]))
+ break;
+ }
+ if (i == ARRAY_SIZE(qos_names))
+ dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass",
+ qos_mode);
+ else
+ ret = i;
+
+exit_get_qos_mode:
+ return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+ struct device_node *node, const char *prop,
+ int *nports)
+{
+ int size = 0, ret;
+ int *arr = NULL;
+
+ if (of_get_property(node, prop, &size)) {
+ *nports = size / sizeof(int);
+ } else {
+ dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+ *nports = 0;
+ return NULL;
+ }
+
+ arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+ dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+ prop);
+ return NULL;
+ }
+
+ ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+ if (ret) {
+ dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+ goto arr_err;
+ }
+
+ return arr;
+arr_err:
+ devm_kfree(&pdev->dev, arr);
+ return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+ struct device_node *dev_node,
+ struct platform_device *pdev)
+{
+ struct msm_bus_fab_device_type *fab_dev;
+ unsigned int ret;
+ struct resource *res;
+ const char *base_name;
+
+ fab_dev = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_fab_device_type),
+ GFP_KERNEL);
+ if (!fab_dev) {
+ dev_err(&pdev->dev,
+ "Error: Unable to allocate memory for fab_dev\n");
+ return NULL;
+ }
+
+ ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+ if (ret) {
+ dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+ goto fab_dev_err;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+ if (!res) {
+ dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+ base_name);
+ goto fab_dev_err;
+ }
+ fab_dev->pqos_base = res->start;
+ fab_dev->qos_range = resource_size(res);
+ fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+ "qcom,bypass-qos-prg");
+
+ ret = of_property_read_u32(dev_node, "qcom,base-offset",
+ &fab_dev->base_offset);
+ if (ret)
+ dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+ ret = of_property_read_u32(dev_node, "qcom,qos-off",
+ &fab_dev->qos_off);
+ if (ret)
+ dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+ ret = of_property_read_u32(dev_node, "qcom,bus-type",
+ &fab_dev->bus_type);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus type is missing\n");
+ goto fab_dev_err;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+ &fab_dev->qos_freq);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+ fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,util-fact",
+ &fab_dev->util_fact);
+ if (ret) {
+ dev_info(&pdev->dev, "Util-fact is missing, default to %d\n",
+ DEFAULT_UTIL_FACT);
+ fab_dev->util_fact = DEFAULT_UTIL_FACT;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+ &fab_dev->vrail_comp);
+ if (ret) {
+ dev_info(&pdev->dev, "Vrail-comp is missing, default to %d\n",
+ DEFAULT_VRAIL_COMP);
+ fab_dev->vrail_comp = DEFAULT_VRAIL_COMP;
+ }
+
+ return fab_dev;
+
+fab_dev_err:
+ devm_kfree(&pdev->dev, fab_dev);
+ fab_dev = 0;
+ return NULL;
+}
+
+static void get_qos_params(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_info_type *node_info)
+{
+ const char *qos_mode = NULL;
+ unsigned int ret;
+ unsigned int temp;
+
+ ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+ if (ret)
+ node_info->qos_params.mode = -1;
+ else
+ node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+ qos_mode);
+
+ of_property_read_u32(dev_node, "qcom,prio-lvl",
+ &node_info->qos_params.prio_lvl);
+
+ of_property_read_u32(dev_node, "qcom,prio1",
+ &node_info->qos_params.prio1);
+
+ of_property_read_u32(dev_node, "qcom,prio0",
+ &node_info->qos_params.prio0);
+
+ of_property_read_u32(dev_node, "qcom,prio-rd",
+ &node_info->qos_params.prio_rd);
+
+ of_property_read_u32(dev_node, "qcom,prio-wr",
+ &node_info->qos_params.prio_wr);
+
+ of_property_read_u32(dev_node, "qcom,gp",
+ &node_info->qos_params.gp);
+
+ of_property_read_u32(dev_node, "qcom,thmp",
+ &node_info->qos_params.thmp);
+
+ of_property_read_u32(dev_node, "qcom,ws",
+ &node_info->qos_params.ws);
+
+ ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+ if (ret)
+ node_info->qos_params.bw_buffer = 0;
+ else
+ node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+
+static struct msm_bus_node_info_type *get_node_info_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev)
+{
+ struct msm_bus_node_info_type *node_info;
+ unsigned int ret;
+ int size;
+ int i;
+ struct device_node *con_node;
+ struct device_node *bus_dev;
+
+ node_info = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_node_info_type),
+ GFP_KERNEL);
+ if (!node_info) {
+ dev_err(&pdev->dev,
+ "Error: Unable to allocate memory for node_info\n");
+ return NULL;
+ }
+
+ ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+ goto node_info_err;
+ }
+ ret = of_property_read_string(dev_node, "label", &node_info->name);
+ if (ret) {
+ dev_warn(&pdev->dev, "Bus node is missing name\n");
+ goto node_info_err;
+ }
+ node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+ &node_info->num_qports);
+
+ if (of_get_property(dev_node, "qcom,connections", &size)) {
+ node_info->num_connections = size / sizeof(int);
+ node_info->connections = devm_kzalloc(&pdev->dev, size,
+ GFP_KERNEL);
+ } else {
+ node_info->num_connections = 0;
+ node_info->connections = 0;
+ }
+
+ for (i = 0; i < node_info->num_connections; i++) {
+ con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+ if (IS_ERR_OR_NULL(con_node))
+ goto node_info_err;
+
+ if (of_property_read_u32(con_node, "cell-id",
+ &node_info->connections[i]))
+ goto node_info_err;
+ of_node_put(con_node);
+ }
+
+ if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+ node_info->num_blist = size/sizeof(u32);
+ node_info->black_listed_connections = devm_kzalloc(&pdev->dev,
+ size, GFP_KERNEL);
+ } else {
+ node_info->num_blist = 0;
+ node_info->black_listed_connections = 0;
+ }
+
+ for (i = 0; i < node_info->num_blist; i++) {
+ con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+ if (IS_ERR_OR_NULL(con_node))
+ goto node_info_err;
+
+ if (of_property_read_u32(con_node, "cell-id",
+ &node_info->black_listed_connections[i]))
+ goto node_info_err;
+ of_node_put(con_node);
+ }
+
+ bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+ if (!IS_ERR_OR_NULL(bus_dev)) {
+ if (of_property_read_u32(bus_dev, "cell-id",
+ &node_info->bus_device_id)) {
+ dev_err(&pdev->dev, "Can't find bus device. Node %d",
+ node_info->id);
+ goto node_info_err;
+ }
+
+ of_node_put(bus_dev);
+ } else
+ dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+ node_info->id);
+
+ node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+ node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+ ret = of_property_read_u32(dev_node, "qcom,buswidth",
+ &node_info->buswidth);
+ if (ret) {
+ dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+ node_info->buswidth = 8;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
+ &node_info->mas_rpm_id);
+ if (ret) {
+ dev_dbg(&pdev->dev, "mas rpm id is missing\n");
+ node_info->mas_rpm_id = -1;
+ }
+
+ ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
+ &node_info->slv_rpm_id);
+ if (ret) {
+ dev_dbg(&pdev->dev, "slv rpm id is missing\n");
+ node_info->slv_rpm_id = -1;
+ }
+ get_qos_params(dev_node, pdev, node_info);
+
+ return node_info;
+
+node_info_err:
+ devm_kfree(&pdev->dev, node_info);
+ node_info = 0;
+ return NULL;
+}
+
+static unsigned int get_bus_node_device_data(
+ struct device_node * const dev_node,
+ struct platform_device * const pdev,
+ struct msm_bus_node_device_type * const node_device)
+{
+ node_device->node_info = get_node_info_data(dev_node, pdev);
+ if (IS_ERR_OR_NULL(node_device->node_info)) {
+ dev_err(&pdev->dev, "Error: Node info missing\n");
+ return -ENODATA;
+ }
+ node_device->ap_owned = of_property_read_bool(dev_node,
+ "qcom,ap-owned");
+
+ if (node_device->node_info->is_fab_dev) {
+ dev_err(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+ if (!node_device->node_info->virt_dev) {
+ node_device->fabdev =
+ get_fab_device_info(dev_node, pdev);
+ if (IS_ERR_OR_NULL(node_device->fabdev)) {
+ dev_err(&pdev->dev,
+ "Error: Fabric device info missing\n");
+ devm_kfree(&pdev->dev, node_device->node_info);
+ return -ENODATA;
+ }
+ }
+ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+ "bus_clk");
+
+ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+ dev_err(&pdev->dev,
+ "%s:Failed to get bus clk for bus%d ctx%d",
+ __func__, node_device->node_info->id,
+ DUAL_CTX);
+
+ node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+ "bus_a_clk");
+ if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk))
+ dev_err(&pdev->dev,
+ "Failed to get bus clk for bus%d ctx%d",
+ node_device->node_info->id, ACTIVE_CTX);
+
+ node_device->qos_clk.clk = of_clk_get_by_name(dev_node,
+ "bus_qos_clk");
+
+ if (IS_ERR_OR_NULL(node_device->qos_clk.clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus qos clk for %d",
+ __func__, node_device->node_info->id);
+
+ if (msmbus_coresight_init_adhoc(pdev, dev_node))
+ dev_warn(&pdev->dev,
+ "Coresight support absent for bus: %d\n",
+ node_device->node_info->id);
+ } else {
+ node_device->qos_clk.clk = of_clk_get_by_name(dev_node,
+ "bus_qos_clk");
+
+ if (IS_ERR_OR_NULL(node_device->qos_clk.clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus qos clk for mas%d",
+ __func__, node_device->node_info->id);
+
+ node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+ "node_clk");
+
+ if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+ dev_dbg(&pdev->dev,
+ "%s:Failed to get bus clk for bus%d ctx%d",
+ __func__, node_device->node_info->id,
+ DUAL_CTX);
+
+ }
+ return 0;
+}
+
+struct msm_bus_device_node_registration
+ *msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+ struct device_node *of_node, *child_node;
+ struct msm_bus_device_node_registration *pdata;
+ unsigned int i = 0, j;
+ unsigned int ret;
+
+ if (!pdev) {
+ pr_err("Error: Null platform device\n");
+ return NULL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_device_node_registration),
+ GFP_KERNEL);
+ if (!pdata) {
+ dev_err(&pdev->dev,
+ "Error: Memory allocation for pdata failed\n");
+ return NULL;
+ }
+
+ pdata->num_devices = of_get_child_count(of_node);
+
+ pdata->info = devm_kzalloc(&pdev->dev,
+ sizeof(struct msm_bus_node_device_type) *
+ pdata->num_devices, GFP_KERNEL);
+
+ if (!pdata->info) {
+ dev_err(&pdev->dev,
+ "Error: Memory allocation for pdata->info failed\n");
+ goto node_reg_err;
+ }
+
+ ret = 0;
+ for_each_child_of_node(of_node, child_node) {
+ ret = get_bus_node_device_data(child_node, pdev,
+ &pdata->info[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+ goto node_reg_err_1;
+ }
+ i++;
+ }
+
+ dev_dbg(&pdev->dev, "bus topology:\n");
+ for (i = 0; i < pdata->num_devices; i++) {
+ dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+ pdata->info[i].node_info->id,
+ pdata->info[i].node_info->num_qports,
+ pdata->info[i].node_info->num_connections);
+ dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+ pdata->info[i].node_info->bus_device_id,
+ pdata->info[i].node_info->buswidth);
+ for (j = 0; j < pdata->info[i].node_info->num_connections;
+ j++) {
+ dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+ pdata->info[i].node_info->connections[j]);
+ }
+ for (j = 0; j < pdata->info[i].node_info->num_blist;
+ j++) {
+ dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+ pdata->info[i].node_info->
+ black_listed_connections[j]);
+ }
+ if (pdata->info[i].fabdev)
+ dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+ (size_t)pdata->info[i].
+ fabdev->pqos_base,
+ pdata->info[i].fabdev->bus_type);
+ }
+ return pdata;
+
+node_reg_err_1:
+ devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+ devm_kfree(&pdev->dev, pdata);
+ pdata = NULL;
+ return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+ struct device_node *dev_node, int **dev_ids,
+ int *num_ids, char *prop_name)
+{
+ int ret = 0;
+ int size, i;
+ struct device_node *rule_node;
+ int *ids = NULL;
+
+ if (of_get_property(dev_node, prop_name, &size)) {
+ *num_ids = size / sizeof(int);
+ ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ } else {
+ dev_err(&pdev->dev, "No rule nodes, skipping node");
+ ret = -ENXIO;
+ goto exit_get_ids;
+ }
+
+ *dev_ids = ids;
+ for (i = 0; i < *num_ids; i++) {
+ rule_node = of_parse_phandle(dev_node, prop_name, i);
+ if (IS_ERR_OR_NULL(rule_node)) {
+ dev_err(&pdev->dev, "Can't get rule node id");
+ ret = -ENXIO;
+ goto err_get_ids;
+ }
+
+ if (of_property_read_u32(rule_node, "cell-id",
+ &ids[i])) {
+ dev_err(&pdev->dev, "Can't get rule node id");
+ ret = -ENXIO;
+ goto err_get_ids;
+ }
+ of_node_put(rule_node);
+ }
+exit_get_ids:
+ return ret;
+err_get_ids:
+ devm_kfree(&pdev->dev, ids);
+ of_node_put(rule_node);
+ ids = NULL;
+ return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+ struct bus_rule_type **static_rules)
+{
+ int ret = 0;
+ struct device_node *of_node, *child_node;
+ int num_rules = 0;
+ int rule_idx = 0;
+ int bw_fld = 0;
+ int i;
+ struct bus_rule_type *static_rule = NULL;
+
+ of_node = pdev->dev.of_node;
+ num_rules = of_get_child_count(of_node);
+ static_rule = devm_kzalloc(&pdev->dev,
+ sizeof(struct bus_rule_type) * num_rules,
+ GFP_KERNEL);
+
+ if (IS_ERR_OR_NULL(static_rule)) {
+ ret = -ENOMEM;
+ goto exit_static_rules;
+ }
+
+ *static_rules = static_rule;
+ for_each_child_of_node(of_node, child_node) {
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &static_rule[rule_idx].src_id,
+ &static_rule[rule_idx].num_src,
+ "qcom,src-nodes");
+
+ ret = msm_bus_of_get_ids(pdev, child_node,
+ &static_rule[rule_idx].dst_node,
+ &static_rule[rule_idx].num_dst,
+ "qcom,dest-node");
+
+ ret = of_property_read_u32(child_node, "qcom,src-field",
+ &static_rule[rule_idx].src_field);
+ if (ret) {
+ dev_err(&pdev->dev, "src-field missing");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,src-op",
+ &static_rule[rule_idx].op);
+ if (ret) {
+ dev_err(&pdev->dev, "src-op missing");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,mode",
+ &static_rule[rule_idx].mode);
+ if (ret) {
+ dev_err(&pdev->dev, "mode missing");
+ ret = -ENXIO;
+ goto err_static_rules;
+ }
+
+ ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+ if (ret) {
+ dev_err(&pdev->dev, "thresh missing");
+ ret = -ENXIO;
+ goto err_static_rules;
+ } else
+ static_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+ ret = of_property_read_u32(child_node, "qcom,dest-bw",
+ &bw_fld);
+ if (ret)
+ static_rule[rule_idx].dst_bw = 0;
+ else
+ static_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+ rule_idx++;
+ }
+ ret = rule_idx;
+exit_static_rules:
+ return ret;
+err_static_rules:
+ for (i = 0; i < num_rules; i++) {
+ if (!IS_ERR_OR_NULL(static_rule)) {
+ if (!IS_ERR_OR_NULL(static_rule[i].src_id))
+ devm_kfree(&pdev->dev,
+ static_rule[i].src_id);
+ if (!IS_ERR_OR_NULL(static_rule[i].dst_node))
+ devm_kfree(&pdev->dev,
+ static_rule[i].dst_node);
+ devm_kfree(&pdev->dev, static_rule);
+ }
+ }
+ devm_kfree(&pdev->dev, *static_rules);
+ static_rules = NULL;
+ return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000000000..be64c78b7f137
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,257 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask()
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+ return true;
+}
+
+struct commit_data {
+ struct msm_bus_node_hw_info *mas_arb;
+ struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+ void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+ int c;
+ struct commit_data *cd = (struct commit_data *)cdata;
+
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+ for (c = 0; c < nmasters; c++)
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->mas_arb[c].hw_id,
+ cd->mas_arb[c].bw);
+ *curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+ for (c = 0; c < nslaves; c++) {
+ *curr += scnprintf(buf + *curr, max_size - *curr,
+ "%d: %llu\t", cd->slv_arb[c].hw_id,
+ cd->slv_arb[c].bw);
+ }
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+ struct msm_bus_fabric_registration *fab_pdata,
+ struct commit_data *cd1, struct commit_data *cd2)
+{
+ size_t n;
+ int ret;
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+ ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+ ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+ if (ret) {
+ MSM_BUS_DBG("Master Arb Data not equal\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+ struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+ struct qcom_msm_bus_req req = {
+ .key = key,
+ };
+ int ret = 0;
+
+ if (ctx == ACTIVE_CTX)
+ ctx = QCOM_SMD_RPM_ACTIVE_STATE;
+ else if (ctx == DUAL_CTX)
+ ctx = QCOM_SMD_RPM_SLEEP_STATE;
+
+#if 0
+ rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+ if (rpm_req == NULL) {
+ MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+ return -ENXIO;
+ }
+#endif
+
+ if (valid) {
+ req.value = hw_info->bw;
+ req.nbytes = sizeof(uint64_t);
+#if 0
+ ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+ &hw_info->bw, (int)(sizeof(uint64_t)));
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto free_rpm_request;
+ }
+
+ MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key,
+ hw_info->bw, sizeof(uint64_t));
+#endif
+ } else {
+ req.value = 0;
+ req.nbytes = 0;
+#if 0
+ /* Invalidate RPM requests */
+ ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+ rsc_type);
+ goto free_rpm_request;
+ }
+#endif
+ }
+
+#if 0
+ msg_id = msm_rpm_send_request(rpm_req);
+ if (!msg_id) {
+ MSM_BUS_WARN("RPM: No message ID for req\n");
+ ret = -ENXIO;
+ goto free_rpm_request;
+ }
+
+ ret = msm_rpm_wait_for_ack(msg_id);
+ if (ret) {
+ MSM_BUS_WARN("RPM: Ack failed\n");
+ goto free_rpm_request;
+ }
+
+free_rpm_request:
+ msm_rpm_free_request(rpm_req);
+#endif
+ ret = qcom_rpm_bus_send_message(ctx, rsc_type, hw_info->hw_id, &req);
+
+ return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+ *fab_pdata, int ctx, void *rpm_data,
+ struct commit_data *cd, bool valid)
+{
+ int i, status = 0, rsc_type, key;
+
+ MSM_BUS_DBG("Context: %d\n", ctx);
+ rsc_type = RPM_BUS_MASTER_REQ;
+ key = RPM_MASTER_FIELD_BW;
+ for (i = 0; i < fab_pdata->nmasters; i++) {
+ if (cd->mas_arb[i].dirty) {
+ MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw,
+ cd->mas_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->mas_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+ cd->mas_arb[i].hw_id,
+ cd->mas_arb[i].bw);
+ break;
+ } else {
+ cd->mas_arb[i].dirty = false;
+ }
+ }
+ }
+
+ rsc_type = RPM_BUS_SLAVE_REQ;
+ key = RPM_SLAVE_FIELD_BW;
+ for (i = 0; i < fab_pdata->nslaves; i++) {
+ if (cd->slv_arb[i].dirty) {
+ MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw,
+ cd->slv_arb[i].dirty);
+ status = msm_bus_rpm_req(ctx, rsc_type, key,
+ &cd->slv_arb[i], valid);
+ if (status) {
+ MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+ cd->slv_arb[i].hw_id,
+ cd->slv_arb[i].bw);
+ break;
+ } else {
+ cd->slv_arb[i].dirty = false;
+ }
+ }
+ }
+
+ return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+ *fab_pdata, void *hw_data, void **cdata)
+{
+
+ int ret;
+ bool valid;
+ struct commit_data *dual_cd, *act_cd;
+ void *rpm_data = hw_data;
+
+ MSM_BUS_DBG("\nReached RPM Commit\n");
+ dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+ act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+ /*
+ * If the arb data for active set and sleep set is
+ * different, commit both sets.
+ * If the arb data for active set and sleep set is
+ * the same, invalidate the sleep set.
+ */
+ ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+ if (!ret)
+ /* Invalidate sleep set.*/
+ valid = false;
+ else
+ valid = true;
+
+ ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+ dual_cd, valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, DUAL_CTX);
+
+ valid = true;
+ ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+ valid);
+ if (ret)
+ MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+ fab_pdata->id, ACTIVE_CTX);
+
+ return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+ struct msm_bus_hw_algorithm *hw_algo)
+{
+ if (!pdata->ahb)
+ pdata->rpm_enabled = 1;
+ return 0;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
new file mode 100644
index 0000000000000..43422a7fddd9c
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -0,0 +1,634 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/list_sort.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct node_vote_info {
+ int id;
+ u64 ib;
+ u64 ab;
+ u64 clk;
+};
+
+struct rules_def {
+ int rule_id;
+ int num_src;
+ int state;
+ struct node_vote_info *src_info;
+ struct bus_rule_type rule_ops;
+ bool state_change;
+ struct list_head link;
+};
+
+struct rule_node_info {
+ int id;
+ void *data;
+ struct raw_notifier_head rule_notify_list;
+ int cur_rule;
+ int num_rules;
+ struct list_head node_rules;
+ struct list_head link;
+ struct rule_apply_rcm_info apply;
+};
+
+DEFINE_MUTEX(msm_bus_rules_lock);
+static LIST_HEAD(node_list);
+static struct rule_node_info *get_node(u32 id, void *data);
+
+#define LE(op1, op2) (op1 <= op2)
+#define LT(op1, op2) (op1 < op2)
+#define GE(op1, op2) (op1 >= op2)
+#define GT(op1, op2) (op1 > op2)
+#define NB_ID (0x201)
+
+static struct rule_node_info *get_node(u32 id, void *data)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rule_node_info *node_match = NULL;
+
+ list_for_each_entry(node_it, &node_list, link) {
+ if (node_it->id == id) {
+ if ((id == NB_ID)) {
+ if ((node_it->data == data)) {
+ node_match = node_it;
+ break;
+ }
+ } else {
+ node_match = node_it;
+ break;
+ }
+ }
+ }
+ return node_match;
+}
+
+static struct rule_node_info *gen_node(u32 id, void *data)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rule_node_info *node_match = NULL;
+
+ list_for_each_entry(node_it, &node_list, link) {
+ if (node_it->id == id) {
+ node_match = node_it;
+ break;
+ }
+ }
+
+ if (!node_match) {
+ node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+ if (!node_match) {
+ pr_err("%s: Cannot allocate memory", __func__);
+ goto exit_node_match;
+ }
+
+ node_match->id = id;
+ node_match->cur_rule = -1;
+ node_match->num_rules = 0;
+ node_match->data = data;
+ list_add_tail(&node_match->link, &node_list);
+ INIT_LIST_HEAD(&node_match->node_rules);
+ RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
+ pr_debug("Added new node %d to list\n", id);
+ }
+exit_node_match:
+ return node_match;
+}
+
+static bool do_compare_op(u64 op1, u64 op2, int op)
+{
+ bool ret = false;
+
+ switch (op) {
+ case OP_LE:
+ ret = LE(op1, op2);
+ break;
+ case OP_LT:
+ ret = LT(op1, op2);
+ break;
+ case OP_GT:
+ ret = GT(op1, op2);
+ break;
+ case OP_GE:
+ ret = GE(op1, op2);
+ break;
+ case OP_NOOP:
+ ret = true;
+ break;
+ default:
+ pr_info("Invalid OP %d", op);
+ break;
+ }
+ return ret;
+}
+
+static void update_src_id_vote(struct rule_update_path_info *inp_node,
+ struct rule_node_info *rule_node)
+{
+ struct rules_def *rule;
+ int i;
+
+ list_for_each_entry(rule, &rule_node->node_rules, link) {
+ for (i = 0; i < rule->num_src; i++) {
+ if (rule->src_info[i].id == inp_node->id) {
+ rule->src_info[i].ib = inp_node->ib;
+ rule->src_info[i].ab = inp_node->ab;
+ rule->src_info[i].clk = inp_node->clk;
+ }
+ }
+ }
+}
+
+static u64 get_field(struct rules_def *rule, int src_id)
+{
+ u64 field = 0;
+ int i;
+
+ for (i = 0; i < rule->num_src; i++) {
+ switch (rule->rule_ops.src_field) {
+ case FLD_IB:
+ field += rule->src_info[i].ib;
+ break;
+ case FLD_AB:
+ field += rule->src_info[i].ab;
+ break;
+ case FLD_CLK:
+ field += rule->src_info[i].clk;
+ break;
+ }
+ }
+
+ return field;
+}
+
+static bool check_rule(struct rules_def *rule,
+ struct rule_update_path_info *inp)
+{
+ bool ret = false;
+
+ if (!rule)
+ return ret;
+
+ switch (rule->rule_ops.op) {
+ case OP_LE:
+ case OP_LT:
+ case OP_GT:
+ case OP_GE:
+ {
+ u64 src_field = get_field(rule, inp->id);
+ if (!src_field)
+ ret = false;
+ else
+ ret = do_compare_op(src_field, rule->rule_ops.thresh,
+ rule->rule_ops.op);
+ break;
+ }
+ default:
+ pr_err("Unsupported op %d", rule->rule_ops.op);
+ break;
+ }
+ return ret;
+}
+
+static void match_rule(struct rule_update_path_info *inp_node,
+ struct rule_node_info *node)
+{
+ struct rules_def *rule;
+ int i;
+ bool match_found = false;
+ bool relevant_trans = false;
+
+ list_for_each_entry(rule, &node->node_rules, link) {
+ for (i = 0; i < rule->num_src; i++) {
+ if (rule->src_info[i].id == inp_node->id) {
+ relevant_trans = true;
+ if (check_rule(rule, inp_node)) {
+ node->cur_rule = rule->rule_id;
+ if (rule->state ==
+ RULE_STATE_NOT_APPLIED) {
+ rule->state =
+ RULE_STATE_APPLIED;
+ rule->state_change = true;
+ match_found = true;
+ }
+ break;
+ }
+ }
+ }
+ if (match_found)
+ break;
+ }
+
+ if (!relevant_trans)
+ return;
+
+ if (!match_found)
+ node->cur_rule = -1;
+
+ list_for_each_entry(rule, &node->node_rules, link) {
+ if (rule->rule_id != node->cur_rule) {
+ if (rule->state == RULE_STATE_APPLIED) {
+ rule->state = RULE_STATE_NOT_APPLIED;
+ rule->state_change = true;
+ }
+ }
+ }
+}
+
+static void apply_rule(struct rule_node_info *node,
+ struct list_head *output_list)
+{
+ struct rules_def *rule;
+
+ list_for_each_entry(rule, &node->node_rules, link) {
+ if (node->id == NB_ID) {
+ if (rule->state_change) {
+ rule->state_change = false;
+ raw_notifier_call_chain(&node->rule_notify_list,
+ rule->state, (void *)&rule->rule_ops);
+ }
+ } else {
+ rule->state_change = false;
+ if ((rule->state == RULE_STATE_APPLIED)) {
+ node->apply.id = rule->rule_ops.dst_node[0];
+ node->apply.throttle = rule->rule_ops.mode;
+ node->apply.lim_bw = rule->rule_ops.dst_bw;
+ list_add_tail(&node->apply.link, output_list);
+ }
+ }
+ }
+
+}
+
+int msm_rules_update_path(struct list_head *input_list,
+ struct list_head *output_list)
+{
+ int ret = 0;
+ struct rule_update_path_info *inp_node;
+ struct rule_node_info *node_it = NULL;
+
+ mutex_lock(&msm_bus_rules_lock);
+ list_for_each_entry(inp_node, input_list, link) {
+ list_for_each_entry(node_it, &node_list, link) {
+ update_src_id_vote(inp_node, node_it);
+ match_rule(inp_node, node_it);
+ }
+ }
+
+ list_for_each_entry(node_it, &node_list, link)
+ apply_rule(node_it, output_list);
+
+ mutex_unlock(&msm_bus_rules_lock);
+ return ret;
+}
+
+static bool ops_equal(int op1, int op2)
+{
+ bool ret = false;
+
+ switch (op1) {
+ case OP_GT:
+ case OP_GE:
+ case OP_LT:
+ case OP_LE:
+ if (abs(op1 - op2) <= 1)
+ ret = true;
+ break;
+ default:
+ ret = (op1 == op2);
+ }
+
+ return ret;
+}
+
+static int node_rules_compare(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct rules_def *ra = container_of(a, struct rules_def, link);
+ struct rules_def *rb = container_of(b, struct rules_def, link);
+ int ret = -1;
+ int64_t th_diff = 0;
+
+
+ if (ra->rule_ops.mode == rb->rule_ops.mode) {
+ if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
+ if ((ra->rule_ops.op == OP_LT) ||
+ (ra->rule_ops.op == OP_LE)) {
+ th_diff = ra->rule_ops.thresh -
+ rb->rule_ops.thresh;
+ if (th_diff > 0)
+ ret = 1;
+ else
+ ret = -1;
+ } else if ((ra->rule_ops.op == OP_GT) ||
+ (ra->rule_ops.op == OP_GE)) {
+ th_diff = rb->rule_ops.thresh -
+ ra->rule_ops.thresh;
+ if (th_diff > 0)
+ ret = 1;
+ else
+ ret = -1;
+ }
+ } else
+ ret = ra->rule_ops.op - rb->rule_ops.op;
+ } else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
+ (rb->rule_ops.mode == THROTTLE_ON)) {
+ ret = 1;
+ } else if ((ra->rule_ops.mode == THROTTLE_ON) &&
+ (rb->rule_ops.mode == THROTTLE_OFF)) {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static void print_rules(struct rule_node_info *node_it)
+{
+ struct rules_def *node_rule = NULL;
+ int i;
+
+ if (!node_it) {
+ pr_err("%s: no node for found", __func__);
+ return;
+ }
+
+ pr_info("\n Now printing rules for Node %d cur rule %d\n",
+ node_it->id, node_it->cur_rule);
+ list_for_each_entry(node_rule, &node_it->node_rules, link) {
+ pr_info("\n num Rules %d rule Id %d\n",
+ node_it->num_rules, node_rule->rule_id);
+ pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
+ for (i = 0; i < node_rule->rule_ops.num_src; i++)
+ pr_info("Rule: src %d\n",
+ node_rule->rule_ops.src_id[i]);
+ for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+ pr_info("Rule: dst %d dst_bw %llu\n",
+ node_rule->rule_ops.dst_node[i],
+ node_rule->rule_ops.dst_bw);
+ pr_info("Rule: thresh %llu op %d mode %d State %d\n",
+ node_rule->rule_ops.thresh,
+ node_rule->rule_ops.op,
+ node_rule->rule_ops.mode,
+ node_rule->state);
+ }
+}
+
+void print_all_rules(void)
+{
+ struct rule_node_info *node_it = NULL;
+
+ list_for_each_entry(node_it, &node_list, link)
+ print_rules(node_it);
+}
+
+void print_rules_buf(char *buf, int max_buf)
+{
+ struct rule_node_info *node_it = NULL;
+ struct rules_def *node_rule = NULL;
+ int i;
+ int cnt = 0;
+
+ list_for_each_entry(node_it, &node_list, link) {
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "\n Now printing rules for Node %d cur_rule %d\n",
+ node_it->id, node_it->cur_rule);
+ list_for_each_entry(node_rule, &node_it->node_rules, link) {
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
+ node_it->num_rules, node_rule->rule_id,
+ node_rule->state, node_rule->state_change);
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Src_field %d\n",
+ node_rule->rule_ops.src_field);
+ for (i = 0; i < node_rule->rule_ops.num_src; i++)
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Src %d Cur Ib %llu Ab %llu\n",
+ node_rule->rule_ops.src_id[i],
+ node_rule->src_info[i].ib,
+ node_rule->src_info[i].ab);
+ for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Dst %d dst_bw %llu\n",
+ node_rule->rule_ops.dst_node[0],
+ node_rule->rule_ops.dst_bw);
+ cnt += scnprintf(buf + cnt, max_buf - cnt,
+ "Thresh %llu op %d mode %d\n",
+ node_rule->rule_ops.thresh,
+ node_rule->rule_ops.op,
+ node_rule->rule_ops.mode);
+ }
+ }
+}
+
+static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
+ struct notifier_block *nb)
+{
+ int i;
+ int ret = 0;
+
+ memcpy(&node_rule->rule_ops, src,
+ sizeof(struct bus_rule_type));
+ node_rule->rule_ops.src_id = kzalloc(
+ (sizeof(int) * node_rule->rule_ops.num_src),
+ GFP_KERNEL);
+ if (!node_rule->rule_ops.src_id) {
+ pr_err("%s:Failed to allocate for src_id",
+ __func__);
+ return -ENOMEM;
+ }
+ memcpy(node_rule->rule_ops.src_id, src->src_id,
+ sizeof(int) * src->num_src);
+
+
+ if (!nb) {
+ node_rule->rule_ops.dst_node = kzalloc(
+ (sizeof(int) * node_rule->rule_ops.num_dst),
+ GFP_KERNEL);
+ if (!node_rule->rule_ops.dst_node) {
+ pr_err("%s:Failed to allocate for src_id",
+ __func__);
+ return -ENOMEM;
+ }
+ memcpy(node_rule->rule_ops.dst_node, src->dst_node,
+ sizeof(int) * src->num_dst);
+ }
+
+ node_rule->num_src = src->num_src;
+ node_rule->src_info = kzalloc(
+ (sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
+ GFP_KERNEL);
+ if (!node_rule->src_info) {
+ pr_err("%s:Failed to allocate for src_id",
+ __func__);
+ return -ENOMEM;
+ }
+ for (i = 0; i < src->num_src; i++)
+ node_rule->src_info[i].id = src->src_id[i];
+
+ return ret;
+}
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ struct rule_node_info *node = NULL;
+ int i, j;
+ struct rules_def *node_rule = NULL;
+ int num_dst = 0;
+
+ if (!rule)
+ return;
+
+ mutex_lock(&msm_bus_rules_lock);
+ for (i = 0; i < num_rules; i++) {
+ if (nb)
+ num_dst = 1;
+ else
+ num_dst = rule[i].num_dst;
+
+ for (j = 0; j < num_dst; j++) {
+ int id = 0;
+
+ if (nb)
+ id = NB_ID;
+ else
+ id = rule[i].dst_node[j];
+
+ node = gen_node(id, nb);
+ if (!node) {
+ pr_info("Error getting rule");
+ goto exit_rule_register;
+ }
+ node_rule = kzalloc(sizeof(struct rules_def),
+ GFP_KERNEL);
+ if (!node_rule) {
+ pr_err("%s: Failed to allocate for rule",
+ __func__);
+ goto exit_rule_register;
+ }
+
+ if (copy_rule(&rule[i], node_rule, nb)) {
+ pr_err("Error copying rule");
+ goto exit_rule_register;
+ }
+
+ node_rule->rule_id = node->num_rules++;
+ if (nb)
+ node->data = nb;
+
+ list_add_tail(&node_rule->link, &node->node_rules);
+ }
+ }
+ list_sort(NULL, &node->node_rules, node_rules_compare);
+
+ if (nb)
+ raw_notifier_chain_register(&node->rule_notify_list, nb);
+exit_rule_register:
+ mutex_unlock(&msm_bus_rules_lock);
+ return;
+}
+
+static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
+{
+ int ret = 1;
+
+ if (rulea->num_src == ruleb->num_src)
+ ret = memcmp(rulea->src_id, ruleb->src_id,
+ (sizeof(int) * rulea->num_src));
+ if (!ret && (rulea->num_dst == ruleb->num_dst))
+ ret = memcmp(rulea->dst_node, ruleb->dst_node,
+ (sizeof(int) * rulea->num_dst));
+ if (!ret && (rulea->dst_bw == ruleb->dst_bw) &&
+ (rulea->op == ruleb->op) && (rulea->thresh == ruleb->thresh))
+ ret = 0;
+
+ return ret;
+}
+
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+ int i;
+ struct rule_node_info *node = NULL;
+ struct rule_node_info *node_tmp = NULL;
+ struct rules_def *node_rule;
+ struct rules_def *node_rule_tmp;
+ bool match_found = false;
+
+ if (!rule)
+ return;
+
+ mutex_lock(&msm_bus_rules_lock);
+ if (nb) {
+ node = get_node(NB_ID, nb);
+ if (!node) {
+ pr_err("%s: Can't find node", __func__);
+ goto exit_unregister_rule;
+ }
+
+ list_for_each_entry_safe(node_rule, node_rule_tmp,
+ &node->node_rules, link) {
+ list_del(&node_rule->link);
+ kfree(node_rule);
+ node->num_rules--;
+ }
+ raw_notifier_chain_unregister(&node->rule_notify_list, nb);
+ } else {
+ for (i = 0; i < num_rules; i++) {
+ match_found = false;
+
+ list_for_each_entry(node, &node_list, link) {
+ list_for_each_entry_safe(node_rule,
+ node_rule_tmp, &node->node_rules, link) {
+ if (comp_rules(&node_rule->rule_ops,
+ &rule[i]) == 0) {
+ list_del(&node_rule->link);
+ kfree(node_rule);
+ match_found = true;
+ node->num_rules--;
+ list_sort(NULL,
+ &node->node_rules,
+ node_rules_compare);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ list_for_each_entry_safe(node, node_tmp,
+ &node_list, link) {
+ if (!node->num_rules) {
+ pr_debug("Deleting Rule node %d", node->id);
+ list_del(&node->link);
+ kfree(node);
+ }
+ }
+exit_unregister_rule:
+ mutex_unlock(&msm_bus_rules_lock);
+}
+
+bool msm_rule_are_rules_registered(void)
+{
+ bool ret = false;
+
+ if (list_empty(&node_list))
+ ret = false;
+ else
+ ret = true;
+
+ return ret;
+}
+
diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c
new file mode 100644
index 0000000000000..cf8216c860df3
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight.c
@@ -0,0 +1,158 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/of_coresight.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+struct msmbus_coresight_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct clk *clk;
+ const char *clk_name;
+ const char *clknode;
+};
+
+static int msmbus_coresight_enable(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+
+ return clk_prepare_enable(drvdata->clk);
+}
+
+static void msmbus_coresight_disable(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+
+ clk_disable_unprepare(drvdata->clk);
+}
+
+static const struct coresight_ops_source msmbus_coresight_source_ops = {
+ .enable = msmbus_coresight_enable,
+ .disable = msmbus_coresight_disable,
+};
+
+static const struct coresight_ops msmbus_coresight_cs_ops = {
+ .source_ops = &msmbus_coresight_source_ops,
+};
+
+void msmbus_coresight_remove(struct platform_device *pdev)
+{
+ struct msmbus_coresight_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ msmbus_coresight_disable(drvdata->csdev);
+ coresight_unregister(drvdata->csdev);
+ devm_kfree(&pdev->dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL(msmbus_coresight_remove);
+
+static int buspm_of_get_clk(struct device_node *of_node,
+ struct msmbus_coresight_drvdata *drvdata)
+{
+ if (of_property_read_string(of_node, "qcom,fabclk-dual",
+ &drvdata->clk_name)) {
+ pr_err("Error: Unable to find clock from of_node\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_string(of_node, "label", &drvdata->clknode)) {
+ pr_err("Error: Unable to find clock-node from of_node\n");
+ return -EINVAL;
+ }
+
+ drvdata->clk = clk_get_sys(drvdata->clknode, drvdata->clk_name);
+ if (IS_ERR(drvdata->clk)) {
+ pr_err("Error: clk_get_sys failed for: %s\n",
+ drvdata->clknode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int msmbus_coresight_init(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct msmbus_coresight_drvdata *drvdata;
+ struct coresight_desc *desc;
+
+ if (pdev->dev.of_node) {
+ pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+ pdev->dev.platform_data = pdata;
+ }
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ pr_err("coresight: Alloc for drvdata failed\n");
+ return -ENOMEM;
+ }
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+ ret = buspm_of_get_clk(pdev->dev.of_node, drvdata);
+ if (ret) {
+ pr_err("Error getting clocks\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ pr_err("coresight: Error allocating memory\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+ desc->ops = &msmbus_coresight_cs_ops;
+ desc->pdata = pdev->dev.platform_data;
+ desc->dev = &pdev->dev;
+ desc->owner = THIS_MODULE;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ pr_err("coresight: Coresight register failed\n");
+ ret = PTR_ERR(drvdata->csdev);
+ goto err0;
+ }
+
+ dev_info(dev, "msmbus_coresight initialized\n");
+
+ return 0;
+err0:
+ devm_kfree(dev, desc);
+err1:
+ devm_kfree(dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+EXPORT_SYMBOL(msmbus_coresight_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM BusPM CoreSight Driver");
diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
new file mode 100644
index 0000000000000..c154878ed621f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
@@ -0,0 +1,189 @@
+/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/of_coresight.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/list.h>
+
+struct msmbus_coresight_adhoc_clock_drvdata {
+ int id;
+ struct clk *clk;
+ struct list_head list;
+};
+
+struct msmbus_coresight_adhoc_drvdata {
+ struct device *dev;
+ struct coresight_device *csdev;
+ struct coresight_desc *desc;
+ struct list_head clocks;
+};
+
+static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+ long rate;
+
+ list_for_each_entry(clk, &drvdata->clocks, list) {
+ if (clk->id == csdev->id) {
+ rate = clk_round_rate(clk->clk, 1L);
+ clk_set_rate(clk->clk, rate);
+ return clk_prepare_enable(clk->clk);
+ }
+ }
+
+ return -ENOENT;
+}
+
+static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ dev_get_drvdata(csdev->dev.parent);
+
+ list_for_each_entry(clk, &drvdata->clocks, list) {
+ if (clk->id == csdev->id)
+ clk_disable_unprepare(clk->clk);
+ }
+}
+
+static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = {
+ .enable = msmbus_coresight_enable_adhoc,
+ .disable = msmbus_coresight_disable_adhoc,
+};
+
+static const struct coresight_ops msmbus_coresight_cs_ops = {
+ .source_ops = &msmbus_coresight_adhoc_source_ops,
+};
+
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk;
+ struct msmbus_coresight_adhoc_drvdata *drvdata =
+ platform_get_drvdata(pdev);
+
+ msmbus_coresight_disable_adhoc(drvdata->csdev);
+ coresight_unregister(drvdata->csdev);
+ list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) {
+ list_del(&clk->list);
+ devm_kfree(&pdev->dev, clk);
+ }
+ devm_kfree(&pdev->dev, drvdata->desc);
+ devm_kfree(&pdev->dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL(msmbus_coresight_remove_adhoc);
+
+static int buspm_of_get_clk_adhoc(struct device_node *of_node,
+ struct msmbus_coresight_adhoc_drvdata *drvdata, int id)
+{
+ struct msmbus_coresight_adhoc_clock_drvdata *clk;
+ clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL);
+
+ if (!clk)
+ return -ENOMEM;
+
+ clk->id = id;
+
+ clk->clk = of_clk_get_by_name(of_node, "bus_clk");
+ if (IS_ERR(clk->clk)) {
+ pr_err("Error: unable to get clock for coresight node %d\n",
+ id);
+ goto err;
+ }
+
+ list_add(&clk->list, &drvdata->clocks);
+ return 0;
+
+err:
+ devm_kfree(drvdata->dev, clk);
+ return -EINVAL;
+}
+
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+ struct coresight_platform_data *pdata;
+ struct msmbus_coresight_adhoc_drvdata *drvdata;
+ struct coresight_desc *desc;
+
+ pdata = of_get_coresight_platform_data(dev, of_node);
+ if (IS_ERR(pdata))
+ return PTR_ERR(pdata);
+
+ drvdata = platform_get_drvdata(pdev);
+ if (IS_ERR_OR_NULL(drvdata)) {
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata) {
+ pr_err("coresight: Alloc for drvdata failed\n");
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&drvdata->clocks);
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+ }
+ ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->id);
+ if (ret) {
+ pr_err("Error getting clocks\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ pr_err("coresight: Error allocating memory\n");
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+ desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+ desc->ops = &msmbus_coresight_cs_ops;
+ desc->pdata = pdata;
+ desc->dev = &pdev->dev;
+ desc->owner = THIS_MODULE;
+ drvdata->desc = desc;
+ drvdata->csdev = coresight_register(desc);
+ if (IS_ERR(drvdata->csdev)) {
+ pr_err("coresight: Coresight register failed\n");
+ ret = PTR_ERR(drvdata->csdev);
+ goto err0;
+ }
+
+ dev_info(dev, "msmbus_coresight initialized\n");
+
+ return 0;
+err0:
+ devm_kfree(dev, desc);
+err1:
+ devm_kfree(dev, drvdata);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+EXPORT_SYMBOL(msmbus_coresight_init_adhoc);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver");
diff --git a/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c b/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c
new file mode 100644
index 0000000000000..cc786252223e1
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/qcom_rpm_msm_bus.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2015, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+struct qcom_rpm_msm_bus_info {
+ struct qcom_smd_rpm *rpm;
+};
+
+static struct qcom_rpm_msm_bus_info rpm_bus_info;
+
+int qcom_rpm_bus_send_message(int ctx, int rsc_type, int id,
+ struct qcom_msm_bus_req *req)
+{
+ return qcom_rpm_smd_write(rpm_bus_info.rpm, ctx, rsc_type, id, req,
+ sizeof(*req));
+}
+EXPORT_SYMBOL(qcom_rpm_bus_send_message);
+
+static int rpm_msm_bus_probe(struct platform_device *pdev)
+{
+ rpm_bus_info.rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm_bus_info.rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id rpm_msm_bus_dt_match[] = {
+ { .compatible = "qcom,rpm-msm-bus", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, rpm_msm_bus_dt_match);
+
+static struct platform_driver rpm_msm_bus_driver = {
+ .driver = {
+ .name = "rpm-msm-bus",
+ .of_match_table = rpm_msm_bus_dt_match,
+ },
+ .probe = rpm_msm_bus_probe,
+};
+
+module_platform_driver(rpm_msm_bus_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM RPM msm bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 731fa066f712e..5296e5189a1a4 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -20,23 +20,29 @@
#include <linux/slab.h>
#include <linux/soc/qcom/smd.h>
+#include <soc/qcom/glink.h>
#include <linux/soc/qcom/smd-rpm.h>
#define RPM_REQUEST_TIMEOUT (5 * HZ)
+#define SMD_RPM BIT(0)
+#define GLINK_RPM BIT(1)
+
/**
* struct qcom_smd_rpm - state of the rpm device driver
- * @rpm_channel: reference to the smd channel
+ * @smd_channel: reference to the smd channel
* @ack: completion for acks
* @lock: mutual exclusion around the send/complete pair
* @ack_status: result of the rpm request
*/
struct qcom_smd_rpm {
- struct qcom_smd_channel *rpm_channel;
-
+ struct device *dev;
+ struct qcom_smd_channel *smd_channel;
+ void *glink_channel;
struct completion ack;
struct mutex lock;
int ack_status;
+ u8 flag;
};
/**
@@ -132,7 +138,12 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
pkt->req.data_len = cpu_to_le32(count);
memcpy(pkt->payload, buf, count);
- ret = qcom_smd_send(rpm->rpm_channel, pkt, size);
+ if (rpm->flag & SMD_RPM) {
+ ret = qcom_smd_send(rpm->smd_channel, pkt, size);
+ } else {
+ ret = glink_tx(rpm->glink_channel, pkt, pkt, size,
+ GLINK_TX_SINGLE_THREADED);
+ }
if (ret)
goto out;
@@ -149,14 +160,13 @@ out:
}
EXPORT_SYMBOL(qcom_rpm_smd_write);
-static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
+static int qcom_ipc_rpm_callback(struct qcom_smd_rpm *rpm,
const void *data,
size_t count)
{
const struct qcom_rpm_header *hdr = data;
size_t hdr_length = le32_to_cpu(hdr->length);
const struct qcom_rpm_message *msg;
- struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
const u8 *buf = data + sizeof(struct qcom_rpm_header);
const u8 *end = buf + hdr_length;
char msgbuf[32];
@@ -165,13 +175,14 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
hdr_length < sizeof(struct qcom_rpm_message)) {
- dev_err(&qsdev->dev, "invalid request\n");
+ dev_err(rpm->dev, "invalid request\n");
return 0;
}
while (buf < end) {
msg = (struct qcom_rpm_message *)buf;
msg_length = le32_to_cpu(msg->length);
+
switch (le32_to_cpu(msg->msg_type)) {
case RPM_MSG_TYPE_MSG_ID:
break;
@@ -191,64 +202,114 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
}
rpm->ack_status = status;
+
complete(&rpm->ack);
return 0;
}
-static int qcom_smd_rpm_probe(struct qcom_smd_device *sdev)
+static int qcom_smd_rpm_callback(void *dev,
+ const void *data,
+ size_t count)
+{
+ struct qcom_smd_rpm *rpm = qcom_smd_get_drvdata(dev);
+ return qcom_ipc_rpm_callback(rpm, data, count);
+}
+
+static int qcom_glink_rpm_callback(void *dev,
+ const void *data,
+ size_t count)
+{
+ struct qcom_smd_rpm *rpm = qcom_glink_get_drvdata(dev);
+ return qcom_ipc_rpm_callback(rpm, data, count);
+}
+
+static int qcom_ipc_rpm_probe(struct qcom_smd_device *idev)
{
struct qcom_smd_rpm *rpm;
- rpm = devm_kzalloc(&sdev->dev, sizeof(*rpm), GFP_KERNEL);
+ rpm = devm_kzalloc(&idev->dev, sizeof(*rpm), GFP_KERNEL);
if (!rpm)
return -ENOMEM;
+ if (of_device_is_compatible(idev->dev.of_node, "qcom,rpm-msm8996")) {
+ rpm->flag |= GLINK_RPM;
+ rpm->glink_channel = idev->channel;
+ qcom_glink_set_drvdata(rpm->glink_channel, rpm);
+ } else { /* default behaviour */
+ rpm->flag |= SMD_RPM;
+ rpm->smd_channel = idev->channel;
+ qcom_smd_set_drvdata(rpm->smd_channel, rpm);
+ }
+
mutex_init(&rpm->lock);
init_completion(&rpm->ack);
- rpm->rpm_channel = sdev->channel;
-
- dev_set_drvdata(&sdev->dev, rpm);
+ rpm->dev = &idev->dev;
+ dev_set_drvdata(&idev->dev, rpm);
- return of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+ return of_platform_populate(idev->dev.of_node, NULL, NULL, &idev->dev);
}
-static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
+static void qcom_ipc_rpm_remove(struct qcom_smd_device *idev)
{
- of_platform_depopulate(&sdev->dev);
+ of_platform_depopulate(&idev->dev);
}
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-apq8084" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8916" },
{}
};
MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
+static const struct of_device_id qcom_glink_rpm_of_match[] = {
+ { .compatible = "qcom,rpm-msm8996" },
+ {}
+};
+
static struct qcom_smd_driver qcom_smd_rpm_driver = {
- .probe = qcom_smd_rpm_probe,
- .remove = qcom_smd_rpm_remove,
+ .probe = qcom_ipc_rpm_probe,
+ .remove = qcom_ipc_rpm_remove,
.callback = qcom_smd_rpm_callback,
.driver = {
- .name = "qcom_smd_rpm",
+ .name = "qcom_ipc_rpm",
.owner = THIS_MODULE,
.of_match_table = qcom_smd_rpm_of_match,
},
};
+static struct qcom_smd_driver qcom_glink_rpm_driver = {
+ .probe = qcom_ipc_rpm_probe,
+ .remove = qcom_ipc_rpm_remove,
+ .callback = qcom_glink_rpm_callback,
+ .driver = {
+ .name = "qcom_ipc_rpm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_glink_rpm_of_match,
+ },
+};
+
static int __init qcom_smd_rpm_init(void)
{
return qcom_smd_driver_register(&qcom_smd_rpm_driver);
}
-arch_initcall(qcom_smd_rpm_init);
+subsys_initcall(qcom_smd_rpm_init);
+
+static int __init qcom_glink_rpm_init(void)
+{
+ return qcom_glink_driver_register(&qcom_glink_rpm_driver);
+}
+subsys_initcall(qcom_glink_rpm_init);
-static void __exit qcom_smd_rpm_exit(void)
+static void __exit qcom_ipc_rpm_exit(void)
{
qcom_smd_driver_unregister(&qcom_smd_rpm_driver);
+ qcom_glink_driver_unregister(&qcom_glink_rpm_driver);
}
-module_exit(qcom_smd_rpm_exit);
+module_exit(qcom_ipc_rpm_exit);
MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
-MODULE_DESCRIPTION("Qualcomm SMD backed RPM driver");
+MODULE_DESCRIPTION("Qualcomm SMD/GLINK backed RPM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index 498fd0581a451..ba90e9512fc06 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -69,6 +69,8 @@ struct smd_channel_info_pair;
struct smd_channel_info_word;
struct smd_channel_info_word_pair;
+static struct bus_type *ipc_bus;
+
#define SMD_ALLOC_TBL_COUNT 2
#define SMD_ALLOC_TBL_SIZE 64
@@ -106,9 +108,9 @@ static const struct {
* @channels: list of all channels detected on this edge
* @channels_lock: guard for modifications of @channels
* @allocated: array of bitmaps representing already allocated channels
- * @need_rescan: flag that the @work needs to scan smem for new channels
* @smem_available: last available amount of smem triggering a channel scan
- * @work: work item for edge house keeping
+ * @scan_work: work item for discovering new channels
+ * @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
struct qcom_smd *smd;
@@ -127,10 +129,12 @@ struct qcom_smd_edge {
DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
- bool need_rescan;
unsigned smem_available;
- struct work_struct work;
+ wait_queue_head_t new_channel_event;
+
+ struct work_struct scan_work;
+ struct work_struct state_work;
};
/*
@@ -169,7 +173,7 @@ enum smd_channel_state {
struct qcom_smd_channel {
struct qcom_smd_edge *edge;
- struct qcom_smd_device *qsdev;
+ struct qcom_smd_device *qidev;
char *name;
enum smd_channel_state state;
@@ -186,13 +190,16 @@ struct qcom_smd_channel {
int fifo_size;
void *bounce_buffer;
- int (*cb)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t cb;
spinlock_t recv_lock;
int pkt_size;
+ void *drvdata;
+
struct list_head list;
+ struct list_head dev_list;
};
/**
@@ -378,6 +385,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
}
/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = cb;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
+/*
* Calculate the amount of data available in the rx fifo
*/
static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
@@ -497,7 +517,6 @@ static void qcom_smd_channel_advance(struct qcom_smd_channel *channel,
*/
static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
{
- struct qcom_smd_device *qsdev = channel->qsdev;
unsigned tail;
size_t len;
void *ptr;
@@ -517,7 +536,8 @@ static int qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
len = channel->pkt_size;
}
- ret = channel->cb(qsdev, ptr, len);
+ ret = channel->cb(channel, ptr, len);
+
if (ret < 0)
return ret;
@@ -601,7 +621,8 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
struct qcom_smd_edge *edge = data;
struct qcom_smd_channel *channel;
unsigned available;
- bool kick_worker = false;
+ bool kick_scanner = false;
+ bool kick_state = false;
/*
* Handle state changes or data on each of the channels on this edge
@@ -609,7 +630,7 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
spin_lock(&edge->channels_lock);
list_for_each_entry(channel, &edge->channels, list) {
spin_lock(&channel->recv_lock);
- kick_worker |= qcom_smd_channel_intr(channel);
+ kick_state |= qcom_smd_channel_intr(channel);
spin_unlock(&channel->recv_lock);
}
spin_unlock(&edge->channels_lock);
@@ -622,12 +643,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
available = qcom_smem_get_free_space(edge->remote_pid);
if (available != edge->smem_available) {
edge->smem_available = available;
- edge->need_rescan = true;
- kick_worker = true;
+ kick_scanner = true;
}
- if (kick_worker)
- schedule_work(&edge->work);
+ if (kick_scanner)
+ schedule_work(&edge->scan_work);
+ if (kick_state)
+ schedule_work(&edge->state_work);
return IRQ_HANDLED;
}
@@ -762,24 +784,24 @@ out:
}
EXPORT_SYMBOL(qcom_smd_send);
-static struct qcom_smd_device *to_smd_device(struct device *dev)
+static struct qcom_smd_device *to_ipc_device(struct device *dev)
{
return container_of(dev, struct qcom_smd_device, dev);
}
-static struct qcom_smd_driver *to_smd_driver(struct device *dev)
+static struct qcom_smd_driver *to_ipc_driver(struct device *dev)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
- return container_of(qsdev->dev.driver, struct qcom_smd_driver, driver);
+ return container_of(qidev->dev.driver, struct qcom_smd_driver, driver);
}
-static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
+static int qcom_ipc_dev_match(struct device *dev, struct device_driver *drv)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
- struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
- const struct qcom_smd_id *match = qsdrv->smd_match_table;
- const char *name = qsdev->channel->name;
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+ struct qcom_smd_driver *qidrv = container_of(drv, struct qcom_smd_driver, driver);
+ const struct qcom_smd_id *match = qidrv->smd_match_table;
+ const char *name = qidev->channel->name;
if (match) {
while (match->name[0]) {
@@ -793,18 +815,12 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
}
/*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
+ * Helper for opening a channel
*/
-static int qcom_smd_dev_probe(struct device *dev)
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
- struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
- struct qcom_smd_channel *channel = qsdev->channel;
size_t bb_size;
- int ret;
/*
* Packets are maximum 4k, but reduce if the fifo is smaller
@@ -814,13 +830,45 @@ static int qcom_smd_dev_probe(struct device *dev)
if (!channel->bounce_buffer)
return -ENOMEM;
- channel->cb = qsdrv->callback;
-
+ qcom_smd_channel_set_callback(channel, cb);
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
- ret = qsdrv->probe(qsdev);
+ return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_set_callback(channel, NULL);
+
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_reset(channel);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_ipc_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+ struct qcom_smd_driver *qidrv = to_ipc_driver(dev);
+ struct qcom_smd_channel *channel = qidev->channel;
+ int ret;
+
+ ret = qcom_smd_channel_open(channel, qidrv->callback);
+ if (ret)
+ return ret;
+
+ ret = qidrv->probe(qidev);
if (ret)
goto err;
@@ -829,13 +877,9 @@ static int qcom_smd_dev_probe(struct device *dev)
return 0;
err:
- dev_err(&qsdev->dev, "probe failed\n");
+ dev_err(&qidev->dev, "probe failed\n");
- channel->cb = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_close(channel);
return ret;
}
@@ -845,21 +889,20 @@ err:
* The channel is going away, for some reason, so remove the smd client and
* reset the channel state.
*/
-static int qcom_smd_dev_remove(struct device *dev)
+static int qcom_ipc_dev_remove(struct device *dev)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
- struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
- struct qcom_smd_channel *channel = qsdev->channel;
- unsigned long flags;
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
+ struct qcom_smd_driver *qidrv = to_ipc_driver(dev);
+ struct qcom_smd_channel *channel = qidev->channel;
+ struct qcom_smd_channel *tmp;
+ struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
/*
* Make sure we don't race with the code receiving data.
*/
- spin_lock_irqsave(&channel->recv_lock, flags);
- channel->cb = NULL;
- spin_unlock_irqrestore(&channel->recv_lock, flags);
+ qcom_smd_channel_set_callback(channel, NULL);
/* Wake up any sleepers in qcom_smd_send() */
wake_up_interruptible(&channel->fblockread_event);
@@ -868,38 +911,37 @@ static int qcom_smd_dev_remove(struct device *dev)
* We expect that the client might block in remove() waiting for any
* outstanding calls to qcom_smd_send() to wake up and finish.
*/
- if (qsdrv->remove)
- qsdrv->remove(qsdev);
+ if (qidrv->remove)
+ qidrv->remove(qidev);
/*
- * The client is now gone, cleanup and reset the channel state.
+ * The client is now gone, close and release all channels associated
+ * with this sdev
*/
- channel->qsdev = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
- qcom_smd_channel_reset(channel);
+ list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+ qcom_smd_channel_close(ch);
+ list_del(&ch->dev_list);
+ ch->qidev = NULL;
+ }
return 0;
}
-static struct bus_type qcom_smd_bus = {
+static struct bus_type qcom_ipc_bus = {
.name = "qcom_smd",
- .match = qcom_smd_dev_match,
- .probe = qcom_smd_dev_probe,
- .remove = qcom_smd_dev_remove,
+ .match = qcom_ipc_dev_match,
+ .probe = qcom_ipc_dev_probe,
+ .remove = qcom_ipc_dev_remove,
};
/*
* Release function for the qcom_smd_device object.
*/
-static void qcom_smd_release_device(struct device *dev)
+static void qcom_ipc_release_device(struct device *dev)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_device *qidev = to_ipc_device(dev);
- kfree(qsdev);
+ kfree(qidev);
}
/*
@@ -929,41 +971,40 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
/*
* Create a smd client device for channel that is being opened.
*/
-static int qcom_smd_create_device(struct qcom_smd_channel *channel)
+static int qcom_ipc_create_device(struct qcom_smd_channel *channel)
{
- struct qcom_smd_device *qsdev;
+ struct qcom_smd_device *qidev;
struct qcom_smd_edge *edge = channel->edge;
struct device_node *node;
struct qcom_smd *smd = edge->smd;
int ret;
- if (channel->qsdev)
+ if (channel->qidev)
return -EEXIST;
dev_dbg(smd->dev, "registering '%s'\n", channel->name);
- qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
- if (!qsdev)
+ qidev = kzalloc(sizeof(*qidev), GFP_KERNEL);
+ if (!qidev)
return -ENOMEM;
node = qcom_smd_match_channel(edge->of_node, channel->name);
- dev_set_name(&qsdev->dev, "%s.%s",
+ dev_set_name(&qidev->dev, "%s.%s",
edge->of_node->name,
node ? node->name : channel->name);
- qsdev->dev.parent = smd->dev;
- qsdev->dev.bus = &qcom_smd_bus;
- qsdev->dev.release = qcom_smd_release_device;
- qsdev->dev.of_node = node;
-
- qsdev->channel = channel;
+ qidev->dev.parent = smd->dev;
+ qidev->dev.bus = &qcom_ipc_bus;
+ qidev->dev.release = qcom_ipc_release_device;
+ qidev->dev.of_node = node;
- channel->qsdev = qsdev;
+ qidev->channel = channel;
+ channel->qidev = qidev;
- ret = device_register(&qsdev->dev);
+ ret = device_register(&qidev->dev);
if (ret) {
dev_err(smd->dev, "device_register failed: %d\n", ret);
- put_device(&qsdev->dev);
+ put_device(&qidev->dev);
}
return ret;
@@ -972,13 +1013,13 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
/*
* Destroy a smd client device for a channel that's going away.
*/
-static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
+static void qcom_ipc_destroy_device(struct qcom_smd_channel *channel)
{
struct device *dev;
- BUG_ON(!channel->qsdev);
+ BUG_ON(!channel->qidev);
- dev = &channel->qsdev->dev;
+ dev = &channel->qidev->dev;
device_unregister(dev);
of_node_put(dev->of_node);
@@ -987,25 +1028,117 @@ static void qcom_smd_destroy_device(struct qcom_smd_channel *channel)
/**
* qcom_smd_driver_register - register a smd driver
- * @qsdrv: qcom_smd_driver struct
+ * @qidrv: qcom_smd_driver struct
*/
-int qcom_smd_driver_register(struct qcom_smd_driver *qsdrv)
+int qcom_smd_driver_register(struct qcom_smd_driver *qidrv)
{
- qsdrv->driver.bus = &qcom_smd_bus;
- return driver_register(&qsdrv->driver);
+ if (!ipc_bus)
+ return 0;
+
+ qidrv->driver.bus = ipc_bus;
+ return driver_register(&qidrv->driver);
}
EXPORT_SYMBOL(qcom_smd_driver_register);
+void *qcom_smd_get_drvdata(void *ch)
+{
+ struct qcom_smd_channel *channel = ch;
+ return channel->drvdata;
+}
+EXPORT_SYMBOL(qcom_smd_get_drvdata);
+
+void qcom_smd_set_drvdata(void *ch, void *data)
+{
+ struct qcom_smd_channel *channel = ch;
+ channel->drvdata = data;
+}
+EXPORT_SYMBOL(qcom_smd_set_drvdata);
+
/**
* qcom_smd_driver_unregister - unregister a smd driver
- * @qsdrv: qcom_smd_driver struct
+ * @qidrv: qcom_smd_driver struct
*/
-void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
+void qcom_smd_driver_unregister(struct qcom_smd_driver *qidrv)
{
- driver_unregister(&qsdrv->driver);
+ if (!ipc_bus)
+ return;
+
+ driver_unregister(&qidrv->driver);
}
EXPORT_SYMBOL(qcom_smd_driver_unregister);
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_channel *ret = NULL;
+ unsigned long flags;
+ unsigned state;
+
+ spin_lock_irqsave(&edge->channels_lock, flags);
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (strcmp(channel->name, name))
+ continue;
+
+ state = GET_RX_CHANNEL_INFO(channel, state);
+ if (state != SMD_CHANNEL_OPENING &&
+ state != SMD_CHANNEL_OPENED)
+ continue;
+
+ ret = channel;
+ break;
+ }
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+
+ return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev: smd_device handle
+ * @name: channel name
+ * @cb: callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent,
+ const char *name,
+ qcom_smd_cb_t cb)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_device *sdev = parent->qidev;
+ struct qcom_smd_edge *edge = parent->edge;
+ int ret;
+
+ /* Wait up to HZ for the channel to appear */
+ ret = wait_event_interruptible_timeout(edge->new_channel_event,
+ (channel = qcom_smd_find_channel(edge, name)) != NULL,
+ HZ);
+ if (!ret)
+ return ERR_PTR(-ETIMEDOUT);
+
+ if (channel->state != SMD_CHANNEL_CLOSED) {
+ dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ channel->qidev = sdev;
+ ret = qcom_smd_channel_open(channel, cb);
+ if (ret) {
+ channel->qidev = NULL;
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Append the list of channel to the channels associated with the sdev
+ */
+ list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+ return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -1027,6 +1160,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
if (!channel)
return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
if (!channel->name)
@@ -1089,8 +1223,9 @@ free_name_and_channel:
* qcom_smd_create_channel() to create representations of these and add
* them to the edge's list of channels.
*/
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
{
+ struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
@@ -1140,10 +1275,12 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
+
+ wake_up_interruptible(&edge->new_channel_event);
}
}
- schedule_work(&edge->work);
+ schedule_work(&edge->state_work);
}
/*
@@ -1151,29 +1288,23 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
* then scans all registered channels for state changes that should be handled
* by creating or destroying smd client devices for the registered channels.
*
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
*/
static void qcom_channel_state_worker(struct work_struct *work)
{
struct qcom_smd_channel *channel;
struct qcom_smd_edge *edge = container_of(work,
struct qcom_smd_edge,
- work);
+ state_work);
unsigned remote_state;
-
- /*
- * Rescan smem if we have reason to belive that there are new channels.
- */
- if (edge->need_rescan) {
- edge->need_rescan = false;
- qcom_discover_channels(edge);
- }
+ unsigned long flags;
/*
* Register a device for any closed channel where the remote processor
* is showing interest in opening the channel.
*/
+ spin_lock_irqsave(&edge->channels_lock, flags);
list_for_each_entry(channel, &edge->channels, list) {
if (channel->state != SMD_CHANNEL_CLOSED)
continue;
@@ -1183,7 +1314,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state != SMD_CHANNEL_OPENED)
continue;
- qcom_smd_create_device(channel);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+ qcom_ipc_create_device(channel);
+ spin_lock_irqsave(&edge->channels_lock, flags);
}
/*
@@ -1200,8 +1333,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state == SMD_CHANNEL_OPENED)
continue;
- qcom_smd_destroy_device(channel);
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
+ qcom_ipc_destroy_device(channel);
+ spin_lock_irqsave(&edge->channels_lock, flags);
}
+ spin_unlock_irqrestore(&edge->channels_lock, flags);
}
/*
@@ -1219,7 +1355,8 @@ static int qcom_smd_parse_edge(struct device *dev,
INIT_LIST_HEAD(&edge->channels);
spin_lock_init(&edge->channels_lock);
- INIT_WORK(&edge->work, qcom_channel_state_worker);
+ INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+ INIT_WORK(&edge->state_work, qcom_channel_state_worker);
edge->of_node = of_node_get(node);
@@ -1303,13 +1440,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, node) {
edge = &smd->edges[i++];
edge->smd = smd;
+ init_waitqueue_head(&edge->new_channel_event);
ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
if (ret)
continue;
- edge->need_rescan = true;
- schedule_work(&edge->work);
+ schedule_work(&edge->scan_work);
}
platform_set_drvdata(pdev, smd);
@@ -1332,13 +1469,15 @@ static int qcom_smd_remove(struct platform_device *pdev)
edge = &smd->edges[i];
disable_irq(edge->irq);
- cancel_work_sync(&edge->work);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+ /* No need to lock here, because the writer is gone */
list_for_each_entry(channel, &edge->channels, list) {
- if (!channel->qsdev)
+ if (!channel->qidev)
continue;
- qcom_smd_destroy_device(channel);
+ qcom_ipc_destroy_device(channel);
}
}
@@ -1360,27 +1499,38 @@ static struct platform_driver qcom_smd_driver = {
},
};
-static int __init qcom_smd_init(void)
-{
- int ret;
+static const struct of_device_id smd_of_device_ids[] __initconst = {
+ { .compatible = "qcom,smd" },
+ {}
+};
- ret = bus_register(&qcom_smd_bus);
- if (ret) {
- pr_err("failed to register smd bus: %d\n", ret);
- return ret;
+static int __init qcom_ipc_init(void)
+{
+ struct device_node *np;
+ int ret = 0;
+
+ np = of_find_matching_node(NULL, smd_of_device_ids);
+ if (np) {
+ ret = bus_register(&qcom_ipc_bus);
+ if (ret) {
+ pr_err("failed to register smd bus: %d\n", ret);
+ return ret;
+ }
+ ipc_bus = &qcom_ipc_bus;
+ return platform_driver_register(&qcom_smd_driver);
}
- return platform_driver_register(&qcom_smd_driver);
+ return ret;
}
-postcore_initcall(qcom_smd_init);
+postcore_initcall(qcom_ipc_init);
-static void __exit qcom_smd_exit(void)
+static void __exit qcom_ipc_exit(void)
{
platform_driver_unregister(&qcom_smd_driver);
- bus_unregister(&qcom_smd_bus);
+ bus_unregister(&qcom_ipc_bus);
}
-module_exit(qcom_smd_exit);
+module_exit(qcom_ipc_exit);
MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@sonymobile.com>");
-MODULE_DESCRIPTION("Qualcomm Shared Memory Driver");
+MODULE_DESCRIPTION("Qualcomm SMD ipc Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 19019aa092e86..2e1aa9f130f4d 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -684,8 +684,7 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
smem->regions[i].aux_base = (u32)r.start;
smem->regions[i].size = resource_size(&r);
- smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
- resource_size(&r));
+ smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, resource_size(&r));
if (!smem->regions[i].virt_base)
return -ENOMEM;
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index 5548a31e1a39a..cd2747a2f5be7 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -20,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
@@ -36,6 +39,9 @@
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* Specifies the PMIC internal slew rate in uV/us. */
+#define REGULATOR_SLEW_RATE 1250
+
enum pm_sleep_mode {
PM_SLEEP_MODE_STBY,
PM_SLEEP_MODE_RET,
@@ -51,6 +57,8 @@ enum spm_reg {
SPM_REG_PMIC_DLY,
SPM_REG_PMIC_DATA_0,
SPM_REG_PMIC_DATA_1,
+ SPM_REG_RST,
+ SPM_REG_STS_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
SPM_REG_SPM_STS,
@@ -68,9 +76,23 @@ struct spm_reg_data {
u8 start_index[PM_SLEEP_MODE_NR];
};
+struct spm_vlevel_data {
+ struct spm_driver_data *drv;
+ unsigned selector;
+};
+
+struct saw2_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ unsigned int uV;
+ u32 vlevel;
+ struct spm_driver_data *drv;
+};
+
struct spm_driver_data {
void __iomem *reg_base;
const struct spm_reg_data *reg_data;
+ struct saw2_vreg *vreg;
};
static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
@@ -94,10 +116,13 @@ static const struct spm_reg_data spm_reg_8974_8084_cpu = {
static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS_1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
[SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_RST] = 0x30,
[SPM_REG_SEQ_ENTRY] = 0x80,
};
@@ -282,6 +307,146 @@ static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+static const unsigned int saw2_volt_table[] = {
+ 850000, 862500, 875000, 887500, 900000, 912500,
+ 925000, 937500, 950000, 962500, 975000, 987500,
+ 1000000, 1012500, 1025000, 1037500, 1050000, 1062500,
+ 1075000, 1087500, 1100000, 1112500, 1125000, 1137000,
+ 1137500, 1150000, 1162500, 1175000, 1187500, 1200000,
+ 1212500, 1225000, 1237500, 1250000, 1287500
+};
+
+static const u32 vlevels[] = {
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x96, 0x96, 0x96, 0x98, 0x98,
+ 0x98, 0x9a, 0x9a, 0x9e, 0xa0, 0xa0,
+ 0xa2, 0xa6, 0xa8, 0xa8, 0xaa, 0xaa,
+ 0xac, 0xac, 0xac, 0xac, 0xac, 0xac,
+ 0xac, 0xac, 0xac, 0xac, 0xac
+};
+
+static int saw2_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->vreg->uV;
+}
+
+static void spm_smp_set_vdd(void *data)
+{
+ struct spm_vlevel_data *vdata = (struct spm_vlevel_data *)data;
+ struct spm_driver_data *drv = vdata->drv;
+ struct saw2_vreg *vreg = drv->vreg;
+ unsigned long sel = vdata->selector;
+ u32 new_vlevel;
+ u32 vctl, data0, data1;
+ int timeout_us = 50;
+
+ if (vreg->vlevel == vlevels[sel])
+ return;
+
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ vctl &= ~0xff;
+ vctl |= vlevels[sel];
+
+ data0 &= ~0xff;
+ data0 |= vlevels[sel];
+
+ data1 &= ~0x3f;
+ data1 |= (vlevels[sel] & 0x3f);
+ data1 &= ~0x3F0000;
+ data1 |= ((vlevels[sel] & 0x3f) << 16);
+
+ spm_register_write(drv, SPM_REG_RST, 1);
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ do {
+ new_vlevel = spm_register_read(drv, SPM_REG_STS_1) & 0xff;
+ if (new_vlevel == vlevels[sel])
+ break;
+ udelay(1);
+ } while (--timeout_us);
+
+ if (!timeout_us) {
+ pr_info("%s: Voltage not changed %#x\n", __func__, new_vlevel);
+ return;
+ }
+
+ if (saw2_volt_table[sel] > vreg->uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((saw2_volt_table[sel] - vreg->uV) / REGULATOR_SLEW_RATE);
+ }
+
+ vreg->uV = saw2_volt_table[sel];
+ vreg->vlevel = vlevels[sel];
+}
+
+static int saw2_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+ struct spm_vlevel_data data;
+ int ret;
+ int cpu = rdev_get_id(rdev);
+
+ data.drv = drv;
+ data.selector = selector;
+
+ ret = smp_call_function_single(cpu, spm_smp_set_vdd, &data, true);
+
+ return ret;
+}
+
+static struct regulator_ops saw2_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .get_voltage = saw2_regulator_get_voltage,
+ .set_voltage_sel = saw2_regulator_set_voltage_sel,
+};
+
+static struct regulator_desc saw2_regulator = {
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &saw2_regulator_ops,
+ .volt_table = saw2_volt_table,
+ .n_voltages = ARRAY_SIZE(saw2_volt_table),
+};
+
+static int register_saw2_regulator(struct spm_driver_data *drv,
+ struct platform_device *pdev, int cpu)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct saw2_vreg *vreg;
+ struct regulator_config config = { };
+
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ drv->vreg = vreg;
+ config.driver_data = drv;
+ config.dev = &pdev->dev;
+ config.of_node = np;
+
+ vreg->rdesc = saw2_regulator;
+ vreg->rdesc.id = cpu;
+ vreg->rdesc.name = of_get_property(np, "regulator-name", NULL);
+ config.init_data = of_get_regulator_init_data(&pdev->dev,
+ pdev->dev.of_node,
+ &vreg->rdesc);
+
+ vreg->rdev = devm_regulator_register(&pdev->dev, &vreg->rdesc, &config);
+ if (IS_ERR(vreg->rdev))
+ return PTR_ERR(vreg->rdev);
+
+ return 0;
+}
+
static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
int *spm_cpu)
{
@@ -327,7 +492,7 @@ static int spm_dev_probe(struct platform_device *pdev)
struct resource *res;
const struct of_device_id *match_id;
void __iomem *addr;
- int cpu;
+ int cpu, ret;
drv = spm_get_drv(pdev, &cpu);
if (!drv)
@@ -356,6 +521,7 @@ static int spm_dev_probe(struct platform_device *pdev)
* machine, before the sequences are completely written.
*/
spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+
spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
spm_register_write(drv, SPM_REG_PMIC_DATA_0,
@@ -368,6 +534,10 @@ static int spm_dev_probe(struct platform_device *pdev)
per_cpu(cpu_spm_drv, cpu) = drv;
+ ret = register_saw2_regulator(drv, pdev, cpu);
+ if (ret)
+ dev_err(&pdev->dev, "error registering SAW2 regulator\n");
+
return 0;
}
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index 7a986f881d5c8..de07a782d7b88 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -106,11 +106,11 @@ struct wcnss_download_nv_resp {
*
* Handles any incoming packets from the remote WCNSS_CTRL service.
*/
-static int wcnss_ctrl_smd_callback(struct qcom_smd_device *qsdev,
+static int wcnss_ctrl_smd_callback(struct qcom_smd_channel *channel,
const void *data,
size_t count)
{
- struct wcnss_ctrl *wcnss = dev_get_drvdata(&qsdev->dev);
+ struct wcnss_ctrl *wcnss = qcom_smd_get_drvdata(channel);
const struct wcnss_download_nv_resp *nvresp;
const struct wcnss_version_resp *version;
const struct wcnss_msg_hdr *hdr = data;
@@ -246,6 +246,7 @@ static int wcnss_ctrl_probe(struct qcom_smd_device *sdev)
init_completion(&wcnss->ack);
INIT_WORK(&wcnss->download_nv_work, wcnss_download_nv);
+ qcom_smd_set_drvdata(wcnss->channel, wcnss);
dev_set_drvdata(&sdev->dev, wcnss);
return wcnss_request_version(wcnss);
diff --git a/include/dt-bindings/arm/qcom-ids.h b/include/dt-bindings/arm/qcom-ids.h
new file mode 100644
index 0000000000000..a18f34e7d9656
--- /dev/null
+++ b/include/dt-bindings/arm/qcom-ids.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_QCOM_IDS_H
+#define __DT_BINDINGS_QCOM_IDS_H
+
+/* qcom,msm-id */
+#define QCOM_ID_MSM8916 206
+#define QCOM_ID_APQ8016 247
+#define QCOM_ID_MSM8216 248
+#define QCOM_ID_MSM8116 249
+#define QCOM_ID_MSM8616 250
+
+/* qcom,board-id */
+#define QCOM_BRD_ID(a, major, minor) \
+ (((major & 0xff) << 16) | ((minor & 0xff) << 8) | QCOM_BRD_ID_##a)
+
+#define QCOM_BRD_ID_MTP 8
+#define QCOM_BRD_ID_DRAGONBRD 10
+#define QCOM_BRD_ID_SBC 24
+
+#define QCOM_BRD_SUBTYPE_DEFAULT 0
+#define QCOM_BRD_SUBTYPE_MTP8916_SMB1360 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h
index 7d20eedfee989..e02742fc81cc7 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h
@@ -319,5 +319,7 @@
#define CE3_SRC 303
#define CE3_CORE_CLK 304
#define CE3_H_CLK 305
+#define PLL16 306
+#define PLL17 307
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index 28651e54c9aee..8be9d54b98b13 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -29,7 +29,6 @@
#define VFE0_CLK_SRC 12
#define VFE1_CLK_SRC 13
#define MDP_CLK_SRC 14
-#define GFX3D_CLK_SRC 15
#define JPEG0_CLK_SRC 16
#define JPEG1_CLK_SRC 17
#define JPEG2_CLK_SRC 18
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8996.h b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
index 7d3a7fa1a1bd8..5abc445ad8152 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8996.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8996.h
@@ -298,5 +298,6 @@
#define FD_GDSC 12
#define MDSS_GDSC 13
#define GPU_GX_GDSC 14
+#define MMAGIC_BIMC_GDSC 15
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h
new file mode 100644
index 0000000000000..ee37c5c22f5f0
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,rpmcc.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H
+#define _DT_BINDINGS_CLK_MSM_RPMCC_H
+
+/* apq8064 */
+#define RPM_PXO_CLK 0
+#define RPM_PXO_A_CLK 1
+#define RPM_CXO_CLK 2
+#define RPM_CXO_A_CLK 3
+#define RPM_APPS_FABRIC_CLK 4
+#define RPM_APPS_FABRIC_A_CLK 5
+#define RPM_CFPB_CLK 6
+#define RPM_CFPB_A_CLK 7
+#define RPM_QDSS_CLK 8
+#define RPM_QDSS_A_CLK 9
+#define RPM_DAYTONA_FABRIC_CLK 10
+#define RPM_DAYTONA_FABRIC_A_CLK 11
+#define RPM_EBI1_CLK 12
+#define RPM_EBI1_A_CLK 13
+#define RPM_MM_FABRIC_CLK 14
+#define RPM_MM_FABRIC_A_CLK 15
+#define RPM_MMFPB_CLK 16
+#define RPM_MMFPB_A_CLK 17
+#define RPM_SYS_FABRIC_CLK 18
+#define RPM_SYS_FABRIC_A_CLK 19
+#define RPM_SFPB_CLK 20
+#define RPM_SFPB_A_CLK 21
+
+/* msm8916 */
+#define RPM_SMD_XO_CLK_SRC 0
+#define RPM_SMD_XO_A_CLK_SRC 1
+#define RPM_SMD_PCNOC_CLK 2
+#define RPM_SMD_PCNOC_A_CLK 3
+#define RPM_SMD_SNOC_CLK 4
+#define RPM_SMD_SNOC_A_CLK 5
+#define RPM_SMD_BIMC_CLK 6
+#define RPM_SMD_BIMC_A_CLK 7
+#define RPM_SMD_QDSS_CLK 8
+#define RPM_SMD_QDSS_A_CLK 9
+#define RPM_SMD_BB_CLK1 10
+#define RPM_SMD_BB_CLK1_A 11
+#define RPM_SMD_BB_CLK2 12
+#define RPM_SMD_BB_CLK2_A 13
+#define RPM_SMD_RF_CLK1 14
+#define RPM_SMD_RF_CLK1_A 15
+#define RPM_SMD_RF_CLK2 16
+#define RPM_SMD_RF_CLK2_A 17
+#define RPM_SMD_BB_CLK1_PIN 18
+#define RPM_SMD_BB_CLK1_A_PIN 19
+#define RPM_SMD_BB_CLK2_PIN 20
+#define RPM_SMD_BB_CLK2_A_PIN 21
+#define RPM_SMD_RF_CLK1_PIN 22
+#define RPM_SMD_RF_CLK1_A_PIN 23
+#define RPM_SMD_RF_CLK2_PIN 24
+#define RPM_SMD_RF_CLK2_A_PIN 25
+#define RPM_BB_CLK1 10
+#define RPM_BB_CLK1_A 11
+#define RPM_BB_CLK2 12
+#define RPM_BB_CLK2_A 13
+#define RPM_RF_CLK1 14
+#define RPM_RF_CLK1_A 15
+#define RPM_RF_CLK2 16
+#define RPM_RF_CLK2_A 17
+#define RPM_BB_CLK1_PIN 18
+#define RPM_BB_CLK1_A_PIN 19
+#define RPM_BB_CLK2_PIN 20
+#define RPM_BB_CLK2_A_PIN 21
+#define RPM_RF_CLK1_PIN 22
+#define RPM_RF_CLK1_A_PIN 23
+#define RPM_RF_CLK2_PIN 24
+#define RPM_RF_CLK2_A_PIN 25
+#define RPM_AGGR1_NOC_CLK 26
+#define RPM_AGGR1_NOC_A_CLK 27
+#define RPM_AGGR2_NOC_CLK 28
+#define RPM_AGGR2_NOC_A_CLK 29
+#define RPM_CNOC_CLK 30
+#define RPM_CNOC_A_CLK 31
+#define RPM_MMAXI_CLK 32
+#define RPM_MMAXI_A_CLK 33
+#define RPM_IPA_CLK 34
+#define RPM_IPA_A_CLK 35
+#define RPM_CE1_CLK 36
+#define RPM_CE1_A_CLK 37
+#define RPM_DIV_CLK1 38
+#define RPM_DIV_CLK1_AO 39
+#define RPM_DIV_CLK2 40
+#define RPM_DIV_CLK2_AO 41
+#define RPM_DIV_CLK3 42
+#define RPM_DIV_CLK3_AO 43
+#define RPM_LN_BB_CLK 44
+#define RPM_LN_BB_A_CLK 45
+
+#endif
diff --git a/include/dt-bindings/soc/msm-bus-ids.h b/include/dt-bindings/soc/msm-bus-ids.h
new file mode 100644
index 0000000000000..9ae56db1d2358
--- /dev/null
+++ b/include/dt-bindings/soc/msm-bus-ids.h
@@ -0,0 +1,661 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Topology related enums */
+#define MSM_BUS_FAB_DEFAULT 0
+#define MSM_BUS_FAB_APPSS 0
+#define MSM_BUS_FAB_SYSTEM 1024
+#define MSM_BUS_FAB_MMSS 2048
+#define MSM_BUS_FAB_SYSTEM_FPB 3072
+#define MSM_BUS_FAB_CPSS_FPB 4096
+
+#define MSM_BUS_FAB_BIMC 0
+#define MSM_BUS_FAB_SYS_NOC 1024
+#define MSM_BUS_FAB_MMSS_NOC 2048
+#define MSM_BUS_FAB_OCMEM_NOC 3072
+#define MSM_BUS_FAB_PERIPH_NOC 4096
+#define MSM_BUS_FAB_CONFIG_NOC 5120
+#define MSM_BUS_FAB_OCMEM_VNOC 6144
+
+#define MSM_BUS_MASTER_FIRST 1
+#define MSM_BUS_MASTER_AMPSS_M0 1
+#define MSM_BUS_MASTER_AMPSS_M1 2
+#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define MSM_BUS_MASTER_SPS 6
+#define MSM_BUS_MASTER_ADM_PORT0 7
+#define MSM_BUS_MASTER_ADM_PORT1 8
+#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define MSM_BUS_MASTER_ADM1_PORT1 10
+#define MSM_BUS_MASTER_LPASS_PROC 11
+#define MSM_BUS_MASTER_MSS_PROCI 12
+#define MSM_BUS_MASTER_MSS_PROCD 13
+#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define MSM_BUS_MASTER_LPASS 15
+#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define MSM_BUS_MASTER_ADM1_CI 19
+#define MSM_BUS_MASTER_ADM0_CI 20
+#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define MSM_BUS_MASTER_MDP_PORT0 22
+#define MSM_BUS_MASTER_MDP_PORT1 23
+#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define MSM_BUS_MASTER_ROTATOR 25
+#define MSM_BUS_MASTER_GRAPHICS_3D 26
+#define MSM_BUS_MASTER_JPEG_DEC 27
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define MSM_BUS_MASTER_VFE 29
+#define MSM_BUS_MASTER_VPE 30
+#define MSM_BUS_MASTER_JPEG_ENC 31
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define MSM_BUS_MASTER_SPDM 36
+#define MSM_BUS_MASTER_RPM 37
+#define MSM_BUS_MASTER_MSS 38
+#define MSM_BUS_MASTER_RIVA 39
+#define MSM_BUS_SYSTEM_MASTER_UNUSED_6 40
+#define MSM_BUS_MASTER_MSS_SW_PROC 41
+#define MSM_BUS_MASTER_MSS_FW_PROC 42
+#define MSM_BUS_MMSS_MASTER_UNUSED_2 43
+#define MSM_BUS_MASTER_GSS_NAV 44
+#define MSM_BUS_MASTER_PCIE 45
+#define MSM_BUS_MASTER_SATA 46
+#define MSM_BUS_MASTER_CRYPTO 47
+#define MSM_BUS_MASTER_VIDEO_CAP 48
+#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define MSM_BUS_MASTER_VIDEO_ENC 50
+#define MSM_BUS_MASTER_VIDEO_DEC 51
+#define MSM_BUS_MASTER_LPASS_AHB 52
+#define MSM_BUS_MASTER_QDSS_BAM 53
+#define MSM_BUS_MASTER_SNOC_CFG 54
+#define MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define MSM_BUS_MASTER_MSS_NAV 57
+#define MSM_BUS_MASTER_OCMEM_DMA 58
+#define MSM_BUS_MASTER_WCSS 59
+#define MSM_BUS_MASTER_QDSS_ETR 60
+#define MSM_BUS_MASTER_USB3 61
+#define MSM_BUS_MASTER_JPEG 62
+#define MSM_BUS_MASTER_VIDEO_P0 63
+#define MSM_BUS_MASTER_VIDEO_P1 64
+#define MSM_BUS_MASTER_MSS_PROC 65
+#define MSM_BUS_MASTER_JPEG_OCMEM 66
+#define MSM_BUS_MASTER_MDP_OCMEM 67
+#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define MSM_BUS_MASTER_VFE_OCMEM 70
+#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define MSM_BUS_MASTER_RPM_INST 72
+#define MSM_BUS_MASTER_RPM_DATA 73
+#define MSM_BUS_MASTER_RPM_SYS 74
+#define MSM_BUS_MASTER_DEHR 75
+#define MSM_BUS_MASTER_QDSS_DAP 76
+#define MSM_BUS_MASTER_TIC 77
+#define MSM_BUS_MASTER_SDCC_1 78
+#define MSM_BUS_MASTER_SDCC_3 79
+#define MSM_BUS_MASTER_SDCC_4 80
+#define MSM_BUS_MASTER_SDCC_2 81
+#define MSM_BUS_MASTER_TSIF 82
+#define MSM_BUS_MASTER_BAM_DMA 83
+#define MSM_BUS_MASTER_BLSP_2 84
+#define MSM_BUS_MASTER_USB_HSIC 85
+#define MSM_BUS_MASTER_BLSP_1 86
+#define MSM_BUS_MASTER_USB_HS 87
+#define MSM_BUS_MASTER_PNOC_CFG 88
+#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define MSM_BUS_MASTER_IPA 90
+#define MSM_BUS_MASTER_QPIC 91
+#define MSM_BUS_MASTER_MDPE 92
+#define MSM_BUS_MASTER_USB_HS2 93
+#define MSM_BUS_MASTER_VPU 94
+#define MSM_BUS_MASTER_UFS 95
+#define MSM_BUS_MASTER_BCAST 96
+#define MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define MSM_BUS_MASTER_EMAC 98
+#define MSM_BUS_MASTER_VPU_1 99
+#define MSM_BUS_MASTER_PCIE_1 100
+#define MSM_BUS_MASTER_USB3_1 101
+#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define MSM_BUS_MASTER_TCU_0 104
+#define MSM_BUS_MASTER_TCU_1 105
+#define MSM_BUS_MASTER_CPP 106
+#define MSM_BUS_MASTER_AUDIO 107
+#define MSM_BUS_MASTER_LAST 108
+
+#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define MSM_BUS_SNOC_MM_INT_0 10000
+#define MSM_BUS_SNOC_MM_INT_1 10001
+#define MSM_BUS_SNOC_MM_INT_2 10002
+#define MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define MSM_BUS_SNOC_INT_0 10004
+#define MSM_BUS_SNOC_INT_1 10005
+#define MSM_BUS_SNOC_INT_BIMC 10006
+#define MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define MSM_BUS_SNOC_QDSS_INT 10009
+#define MSM_BUS_PNOC_SNOC_MAS 10010
+#define MSM_BUS_PNOC_SNOC_SLV 10011
+#define MSM_BUS_PNOC_INT_0 10012
+#define MSM_BUS_PNOC_INT_1 10013
+#define MSM_BUS_PNOC_M_0 10014
+#define MSM_BUS_PNOC_M_1 10015
+#define MSM_BUS_BIMC_SNOC_MAS 10016
+#define MSM_BUS_BIMC_SNOC_SLV 10017
+#define MSM_BUS_PNOC_SLV_0 10018
+#define MSM_BUS_PNOC_SLV_1 10019
+#define MSM_BUS_PNOC_SLV_2 10020
+#define MSM_BUS_PNOC_SLV_3 10021
+#define MSM_BUS_PNOC_SLV_4 10022
+#define MSM_BUS_PNOC_SLV_8 10023
+#define MSM_BUS_PNOC_SLV_9 10024
+#define MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define MSM_BUS_MNOC_BIMC_MAS 10027
+#define MSM_BUS_MNOC_BIMC_SLV 10028
+#define MSM_BUS_BIMC_MNOC_MAS 10029
+#define MSM_BUS_BIMC_MNOC_SLV 10030
+#define MSM_BUS_SNOC_BIMC_MAS 10031
+#define MSM_BUS_SNOC_BIMC_SLV 10032
+#define MSM_BUS_CNOC_SNOC_MAS 10033
+#define MSM_BUS_CNOC_SNOC_SLV 10034
+#define MSM_BUS_SNOC_CNOC_MAS 10035
+#define MSM_BUS_SNOC_CNOC_SLV 10036
+#define MSM_BUS_OVNOC_SNOC_MAS 10037
+#define MSM_BUS_OVNOC_SNOC_SLV 10038
+#define MSM_BUS_SNOC_OVNOC_MAS 10039
+#define MSM_BUS_SNOC_OVNOC_SLV 10040
+#define MSM_BUS_SNOC_PNOC_MAS 10041
+#define MSM_BUS_SNOC_PNOC_SLV 10042
+#define MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define MSM_BUS_PNOC_SLV_5 10047
+#define MSM_BUS_PNOC_SLV_7 10048
+#define MSM_BUS_PNOC_INT_2 10049
+#define MSM_BUS_PNOC_INT_3 10050
+#define MSM_BUS_PNOC_INT_4 10051
+#define MSM_BUS_PNOC_INT_5 10052
+#define MSM_BUS_PNOC_INT_6 10053
+#define MSM_BUS_PNOC_INT_7 10054
+#define MSM_BUS_INT_LAST 10055
+
+#define MSM_BUS_INT_TEST_ID 20000
+#define MSM_BUS_INT_TEST_LAST 20050
+
+#define MSM_BUS_SLAVE_FIRST 512
+#define MSM_BUS_SLAVE_EBI_CH0 512
+#define MSM_BUS_SLAVE_EBI_CH1 513
+#define MSM_BUS_SLAVE_AMPSS_L2 514
+#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define MSM_BUS_SLAVE_SPS 518
+#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define MSM_BUS_SLAVE_AMPSS 520
+#define MSM_BUS_SLAVE_MSS 521
+#define MSM_BUS_SLAVE_LPASS 522
+#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define MSM_BUS_SLAVE_CORESIGHT 526
+#define MSM_BUS_SLAVE_RIVA 527
+#define MSM_BUS_SLAVE_SMI 528
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define MSM_BUS_SLAVE_MM_IMEM 531
+#define MSM_BUS_SLAVE_CRYPTO 532
+#define MSM_BUS_SLAVE_SPDM 533
+#define MSM_BUS_SLAVE_RPM 534
+#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define MSM_BUS_SLAVE_MPM 536
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define MSM_BUS_SLAVE_GSBI1_UART 542
+#define MSM_BUS_SLAVE_GSBI2_UART 543
+#define MSM_BUS_SLAVE_GSBI3_UART 544
+#define MSM_BUS_SLAVE_GSBI4_UART 545
+#define MSM_BUS_SLAVE_GSBI5_UART 546
+#define MSM_BUS_SLAVE_GSBI6_UART 547
+#define MSM_BUS_SLAVE_GSBI7_UART 548
+#define MSM_BUS_SLAVE_GSBI8_UART 549
+#define MSM_BUS_SLAVE_GSBI9_UART 550
+#define MSM_BUS_SLAVE_GSBI10_UART 551
+#define MSM_BUS_SLAVE_GSBI11_UART 552
+#define MSM_BUS_SLAVE_GSBI12_UART 553
+#define MSM_BUS_SLAVE_GSBI1_QUP 554
+#define MSM_BUS_SLAVE_GSBI2_QUP 555
+#define MSM_BUS_SLAVE_GSBI3_QUP 556
+#define MSM_BUS_SLAVE_GSBI4_QUP 557
+#define MSM_BUS_SLAVE_GSBI5_QUP 558
+#define MSM_BUS_SLAVE_GSBI6_QUP 559
+#define MSM_BUS_SLAVE_GSBI7_QUP 560
+#define MSM_BUS_SLAVE_GSBI8_QUP 561
+#define MSM_BUS_SLAVE_GSBI9_QUP 562
+#define MSM_BUS_SLAVE_GSBI10_QUP 563
+#define MSM_BUS_SLAVE_GSBI11_QUP 564
+#define MSM_BUS_SLAVE_GSBI12_QUP 565
+#define MSM_BUS_SLAVE_EBI2_NAND 566
+#define MSM_BUS_SLAVE_EBI2_CS0 567
+#define MSM_BUS_SLAVE_EBI2_CS1 568
+#define MSM_BUS_SLAVE_EBI2_CS2 569
+#define MSM_BUS_SLAVE_EBI2_CS3 570
+#define MSM_BUS_SLAVE_EBI2_CS4 571
+#define MSM_BUS_SLAVE_EBI2_CS5 572
+#define MSM_BUS_SLAVE_USB_FS1 573
+#define MSM_BUS_SLAVE_USB_FS2 574
+#define MSM_BUS_SLAVE_TSIF 575
+#define MSM_BUS_SLAVE_MSM_TSSC 576
+#define MSM_BUS_SLAVE_MSM_PDM 577
+#define MSM_BUS_SLAVE_MSM_DIMEM 578
+#define MSM_BUS_SLAVE_MSM_TCSR 579
+#define MSM_BUS_SLAVE_MSM_PRNG 580
+#define MSM_BUS_SLAVE_GSS 581
+#define MSM_BUS_SLAVE_SATA 582
+#define MSM_BUS_SLAVE_USB3 583
+#define MSM_BUS_SLAVE_WCSS 584
+#define MSM_BUS_SLAVE_OCIMEM 585
+#define MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define MSM_BUS_SLAVE_QDSS_STM 588
+#define MSM_BUS_SLAVE_CAMERA_CFG 589
+#define MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define MSM_BUS_SLAVE_OCMEM_CFG 591
+#define MSM_BUS_SLAVE_CPR_CFG 592
+#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define MSM_BUS_SLAVE_MISC_CFG 594
+#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define MSM_BUS_SLAVE_VENUS_CFG 596
+#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define MSM_BUS_SLAVE_OCMEM 604
+#define MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define MSM_BUS_SLAVE_SDCC_1 606
+#define MSM_BUS_SLAVE_SDCC_3 607
+#define MSM_BUS_SLAVE_SDCC_2 608
+#define MSM_BUS_SLAVE_SDCC_4 609
+#define MSM_BUS_SLAVE_BAM_DMA 610
+#define MSM_BUS_SLAVE_BLSP_2 611
+#define MSM_BUS_SLAVE_USB_HSIC 612
+#define MSM_BUS_SLAVE_BLSP_1 613
+#define MSM_BUS_SLAVE_USB_HS 614
+#define MSM_BUS_SLAVE_PDM 615
+#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define MSM_BUS_SLAVE_PRNG 618
+#define MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define MSM_BUS_SLAVE_CLK_CTL 620
+#define MSM_BUS_SLAVE_CNOC_MSS 621
+#define MSM_BUS_SLAVE_SECURITY 622
+#define MSM_BUS_SLAVE_TCSR 623
+#define MSM_BUS_SLAVE_TLMM 624
+#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define MSM_BUS_SLAVE_IMEM_CFG 627
+#define MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define MSM_BUS_SLAVE_BIMC_CFG 629
+#define MSM_BUS_SLAVE_BOOT_ROM 630
+#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define MSM_BUS_SLAVE_PMIC_ARB 632
+#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define MSM_BUS_SLAVE_DEHR_CFG 634
+#define MSM_BUS_SLAVE_QDSS_CFG 635
+#define MSM_BUS_SLAVE_RBCPR_CFG 636
+#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define MSM_BUS_SLAVE_PNOC_CFG 641
+#define MSM_BUS_SLAVE_SNOC_CFG 642
+#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define MSM_BUS_SLAVE_IPS_CFG 647
+#define MSM_BUS_SLAVE_QPIC 648
+#define MSM_BUS_SLAVE_DSI_CFG 649
+#define MSM_BUS_SLAVE_UFS_CFG 650
+#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define MSM_BUS_SLAVE_PCIE_CFG 653
+#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define MSM_BUS_SLAVE_VPU_CFG 658
+#define MSM_BUS_SLAVE_BCAST_CFG 659
+#define MSM_BUS_SLAVE_KLM_CFG 660
+#define MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define MSM_BUS_SLAVE_OCMEM_GFX 662
+#define MSM_BUS_SLAVE_CATS_128 663
+#define MSM_BUS_SLAVE_OCMEM_64 664
+#define MSM_BUS_SLAVE_PCIE_0 665
+#define MSM_BUS_SLAVE_PCIE_1 666
+#define MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define MSM_BUS_SLAVE_SRVC_MNOC 669
+#define MSM_BUS_SLAVE_USB_HS2 670
+#define MSM_BUS_SLAVE_AUDIO 671
+#define MSM_BUS_SLAVE_TCU 672
+#define MSM_BUS_SLAVE_APPSS 673
+#define MSM_BUS_SLAVE_PCIE_PARF 674
+#define MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define MSM_BUS_SLAVE_IPA_CFG 676
+#define MSM_BUS_SLAVE_LAST 677
+
+#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define ICBID_MASTER_APPSS_PROC 0
+#define ICBID_MASTER_MSS_PROC 1
+#define ICBID_MASTER_MNOC_BIMC 2
+#define ICBID_MASTER_SNOC_BIMC 3
+#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define ICBID_MASTER_CNOC_MNOC_CFG 5
+#define ICBID_MASTER_GFX3D 6
+#define ICBID_MASTER_JPEG 7
+#define ICBID_MASTER_MDP 8
+#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define ICBID_MASTER_VIDEO 9
+#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define ICBID_MASTER_VIDEO_P1 10
+#define ICBID_MASTER_VFE 11
+#define ICBID_MASTER_CNOC_ONOC_CFG 12
+#define ICBID_MASTER_JPEG_OCMEM 13
+#define ICBID_MASTER_MDP_OCMEM 14
+#define ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define ICBID_MASTER_VFE_OCMEM 17
+#define ICBID_MASTER_LPASS_AHB 18
+#define ICBID_MASTER_QDSS_BAM 19
+#define ICBID_MASTER_SNOC_CFG 20
+#define ICBID_MASTER_BIMC_SNOC 21
+#define ICBID_MASTER_CNOC_SNOC 22
+#define ICBID_MASTER_CRYPTO 23
+#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define ICBID_MASTER_CRYPTO_CORE1 24
+#define ICBID_MASTER_LPASS_PROC 25
+#define ICBID_MASTER_MSS 26
+#define ICBID_MASTER_MSS_NAV 27
+#define ICBID_MASTER_OCMEM_DMA 28
+#define ICBID_MASTER_PNOC_SNOC 29
+#define ICBID_MASTER_WCSS 30
+#define ICBID_MASTER_QDSS_ETR 31
+#define ICBID_MASTER_USB3 32
+#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define ICBID_MASTER_SDCC_1 33
+#define ICBID_MASTER_SDCC_3 34
+#define ICBID_MASTER_SDCC_2 35
+#define ICBID_MASTER_SDCC_4 36
+#define ICBID_MASTER_TSIF 37
+#define ICBID_MASTER_BAM_DMA 38
+#define ICBID_MASTER_BLSP_2 39
+#define ICBID_MASTER_USB_HSIC 40
+#define ICBID_MASTER_BLSP_1 41
+#define ICBID_MASTER_USB_HS 42
+#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define ICBID_MASTER_PNOC_CFG 43
+#define ICBID_MASTER_SNOC_PNOC 44
+#define ICBID_MASTER_RPM_INST 45
+#define ICBID_MASTER_RPM_DATA 46
+#define ICBID_MASTER_RPM_SYS 47
+#define ICBID_MASTER_DEHR 48
+#define ICBID_MASTER_QDSS_DAP 49
+#define ICBID_MASTER_SPDM 50
+#define ICBID_MASTER_TIC 51
+#define ICBID_MASTER_SNOC_CNOC 52
+#define ICBID_MASTER_GFX3D_OCMEM 53
+#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define ICBID_MASTER_OVIRT_SNOC 54
+#define ICBID_MASTER_SNOC_OVIRT 55
+#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define ICBID_MASTER_ONOC_OVIRT 56
+#define ICBID_MASTER_USB_HS2 57
+#define ICBID_MASTER_QPIC 58
+#define ICBID_MASTER_IPA 59
+#define ICBID_MASTER_DSI 60
+#define ICBID_MASTER_MDP1 61
+#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define ICBID_MASTER_VPU_PROC 62
+#define ICBID_MASTER_VPU 63
+#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define ICBID_MASTER_CRYPTO_CORE2 64
+#define ICBID_MASTER_PCIE_0 65
+#define ICBID_MASTER_PCIE_1 66
+#define ICBID_MASTER_SATA 67
+#define ICBID_MASTER_UFS 68
+#define ICBID_MASTER_USB3_1 69
+#define ICBID_MASTER_VIDEO_OCMEM 70
+#define ICBID_MASTER_VPU1 71
+#define ICBID_MASTER_VCAP 72
+#define ICBID_MASTER_EMAC 73
+#define ICBID_MASTER_BCAST 74
+#define ICBID_MASTER_MMSS_PROC 75
+#define ICBID_MASTER_SNOC_BIMC_1 76
+#define ICBID_MASTER_SNOC_PCNOC 77
+#define ICBID_MASTER_AUDIO 78
+#define ICBID_MASTER_MM_INT_0 79
+#define ICBID_MASTER_MM_INT_1 80
+#define ICBID_MASTER_MM_INT_2 81
+#define ICBID_MASTER_MM_INT_BIMC 82
+#define ICBID_MASTER_MSS_INT 83
+#define ICBID_MASTER_PCNOC_CFG 84
+#define ICBID_MASTER_PCNOC_INT_0 85
+#define ICBID_MASTER_PCNOC_INT_1 86
+#define ICBID_MASTER_PCNOC_M_0 87
+#define ICBID_MASTER_PCNOC_M_1 88
+#define ICBID_MASTER_PCNOC_S_0 89
+#define ICBID_MASTER_PCNOC_S_1 90
+#define ICBID_MASTER_PCNOC_S_2 91
+#define ICBID_MASTER_PCNOC_S_3 92
+#define ICBID_MASTER_PCNOC_S_4 93
+#define ICBID_MASTER_PCNOC_S_6 94
+#define ICBID_MASTER_PCNOC_S_7 95
+#define ICBID_MASTER_PCNOC_S_8 96
+#define ICBID_MASTER_PCNOC_S_9 97
+#define ICBID_MASTER_QDSS_INT 98
+#define ICBID_MASTER_SNOC_INT_0 99
+#define ICBID_MASTER_SNOC_INT_1 100
+#define ICBID_MASTER_SNOC_INT_BIMC 101
+#define ICBID_MASTER_TCU_0 102
+#define ICBID_MASTER_TCU_1 103
+#define ICBID_MASTER_BIMC_INT_0 104
+#define ICBID_MASTER_BIMC_INT_1 105
+#define ICBID_MASTER_CAMERA 106
+#define ICBID_MASTER_RICA 107
+#define ICBID_MASTER_PCNOC_S_5 129
+#define ICBID_MASTER_PCNOC_INT_2 124
+#define ICBID_MASTER_PCNOC_INT_3 125
+#define ICBID_MASTER_PCNOC_INT_4 126
+#define ICBID_MASTER_PCNOC_INT_5 127
+#define ICBID_MASTER_PCNOC_INT_6 128
+
+#define ICBID_SLAVE_EBI1 0
+#define ICBID_SLAVE_APPSS_L2 1
+#define ICBID_SLAVE_BIMC_SNOC 2
+#define ICBID_SLAVE_CAMERA_CFG 3
+#define ICBID_SLAVE_DISPLAY_CFG 4
+#define ICBID_SLAVE_OCMEM_CFG 5
+#define ICBID_SLAVE_CPR_CFG 6
+#define ICBID_SLAVE_CPR_XPU_CFG 7
+#define ICBID_SLAVE_MISC_CFG 8
+#define ICBID_SLAVE_MISC_XPU_CFG 9
+#define ICBID_SLAVE_VENUS_CFG 10
+#define ICBID_SLAVE_GFX3D_CFG 11
+#define ICBID_SLAVE_MMSS_CLK_CFG 12
+#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define ICBID_SLAVE_MNOC_MPU_CFG 14
+#define ICBID_SLAVE_ONOC_MPU_CFG 15
+#define ICBID_SLAVE_MNOC_BIMC 16
+#define ICBID_SLAVE_SERVICE_MNOC 17
+#define ICBID_SLAVE_OCMEM 18
+#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define ICBID_SLAVE_SERVICE_ONOC 19
+#define ICBID_SLAVE_APPSS 20
+#define ICBID_SLAVE_LPASS 21
+#define ICBID_SLAVE_USB3 22
+#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define ICBID_SLAVE_WCSS 23
+#define ICBID_SLAVE_SNOC_BIMC 24
+#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define ICBID_SLAVE_SNOC_CNOC 25
+#define ICBID_SLAVE_IMEM 26
+#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define ICBID_SLAVE_SNOC_OVIRT 27
+#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define ICBID_SLAVE_SNOC_PNOC 28
+#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define ICBID_SLAVE_SERVICE_SNOC 29
+#define ICBID_SLAVE_QDSS_STM 30
+#define ICBID_SLAVE_SDCC_1 31
+#define ICBID_SLAVE_SDCC_3 32
+#define ICBID_SLAVE_SDCC_2 33
+#define ICBID_SLAVE_SDCC_4 34
+#define ICBID_SLAVE_TSIF 35
+#define ICBID_SLAVE_BAM_DMA 36
+#define ICBID_SLAVE_BLSP_2 37
+#define ICBID_SLAVE_USB_HSIC 38
+#define ICBID_SLAVE_BLSP_1 39
+#define ICBID_SLAVE_USB_HS 40
+#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define ICBID_SLAVE_PDM 41
+#define ICBID_SLAVE_PERIPH_APU_CFG 42
+#define ICBID_SLAVE_PNOC_MPU_CFG 43
+#define ICBID_SLAVE_PRNG 44
+#define ICBID_SLAVE_PNOC_SNOC 45
+#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define ICBID_SLAVE_SERVICE_PNOC 46
+#define ICBID_SLAVE_CLK_CTL 47
+#define ICBID_SLAVE_CNOC_MSS 48
+#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define ICBID_SLAVE_SECURITY 49
+#define ICBID_SLAVE_TCSR 50
+#define ICBID_SLAVE_TLMM 51
+#define ICBID_SLAVE_CRYPTO_0_CFG 52
+#define ICBID_SLAVE_CRYPTO_1_CFG 53
+#define ICBID_SLAVE_IMEM_CFG 54
+#define ICBID_SLAVE_MESSAGE_RAM 55
+#define ICBID_SLAVE_BIMC_CFG 56
+#define ICBID_SLAVE_BOOT_ROM 57
+#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define ICBID_SLAVE_PMIC_ARB 59
+#define ICBID_SLAVE_SPDM_WRAPPER 60
+#define ICBID_SLAVE_DEHR_CFG 61
+#define ICBID_SLAVE_MPM 62
+#define ICBID_SLAVE_QDSS_CFG 63
+#define ICBID_SLAVE_RBCPR_CFG 64
+#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define ICBID_SLAVE_SNOC_MPU_CFG 67
+#define ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define ICBID_SLAVE_PNOC_CFG 69
+#define ICBID_SLAVE_SNOC_CFG 70
+#define ICBID_SLAVE_EBI1_DLL_CFG 71
+#define ICBID_SLAVE_PHY_APU_CFG 72
+#define ICBID_SLAVE_EBI1_PHY_CFG 73
+#define ICBID_SLAVE_RPM 74
+#define ICBID_SLAVE_CNOC_SNOC 75
+#define ICBID_SLAVE_SERVICE_CNOC 76
+#define ICBID_SLAVE_OVIRT_SNOC 77
+#define ICBID_SLAVE_OVIRT_OCMEM 78
+#define ICBID_SLAVE_USB_HS2 79
+#define ICBID_SLAVE_QPIC 80
+#define ICBID_SLAVE_IPS_CFG 81
+#define ICBID_SLAVE_DSI_CFG 82
+#define ICBID_SLAVE_USB3_1 83
+#define ICBID_SLAVE_PCIE_0 84
+#define ICBID_SLAVE_PCIE_1 85
+#define ICBID_SLAVE_PSS_SMMU_CFG 86
+#define ICBID_SLAVE_CRYPTO_2_CFG 87
+#define ICBID_SLAVE_PCIE_0_CFG 88
+#define ICBID_SLAVE_PCIE_1_CFG 89
+#define ICBID_SLAVE_SATA_CFG 90
+#define ICBID_SLAVE_SPSS_GENI_IR 91
+#define ICBID_SLAVE_UFS_CFG 92
+#define ICBID_SLAVE_AVSYNC_CFG 93
+#define ICBID_SLAVE_VPU_CFG 94
+#define ICBID_SLAVE_USB_PHY_CFG 95
+#define ICBID_SLAVE_RBCPR_MX_CFG 96
+#define ICBID_SLAVE_PCIE_PARF 97
+#define ICBID_SLAVE_VCAP_CFG 98
+#define ICBID_SLAVE_EMAC_CFG 99
+#define ICBID_SLAVE_BCAST_CFG 100
+#define ICBID_SLAVE_KLM_CFG 101
+#define ICBID_SLAVE_DISPLAY_PWM 102
+#define ICBID_SLAVE_GENI 103
+#define ICBID_SLAVE_SNOC_BIMC_1 104
+#define ICBID_SLAVE_AUDIO 105
+#define ICBID_SLAVE_CATS_0 106
+#define ICBID_SLAVE_CATS_1 107
+#define ICBID_SLAVE_MM_INT_0 108
+#define ICBID_SLAVE_MM_INT_1 109
+#define ICBID_SLAVE_MM_INT_2 110
+#define ICBID_SLAVE_MM_INT_BIMC 111
+#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define ICBID_SLAVE_MSS_INT 113
+#define ICBID_SLAVE_PCNOC_INT_0 114
+#define ICBID_SLAVE_PCNOC_INT_1 115
+#define ICBID_SLAVE_PCNOC_M_0 116
+#define ICBID_SLAVE_PCNOC_M_1 117
+#define ICBID_SLAVE_PCNOC_S_0 118
+#define ICBID_SLAVE_PCNOC_S_1 119
+#define ICBID_SLAVE_PCNOC_S_2 120
+#define ICBID_SLAVE_PCNOC_S_3 121
+#define ICBID_SLAVE_PCNOC_S_4 122
+#define ICBID_SLAVE_PCNOC_S_6 123
+#define ICBID_SLAVE_PCNOC_S_7 124
+#define ICBID_SLAVE_PCNOC_S_8 125
+#define ICBID_SLAVE_PCNOC_S_9 126
+#define ICBID_SLAVE_PRNG_XPU_CFG 127
+#define ICBID_SLAVE_QDSS_INT 128
+#define ICBID_SLAVE_RPM_XPU_CFG 129
+#define ICBID_SLAVE_SNOC_INT_0 130
+#define ICBID_SLAVE_SNOC_INT_1 131
+#define ICBID_SLAVE_SNOC_INT_BIMC 132
+#define ICBID_SLAVE_TCU 133
+#define ICBID_SLAVE_BIMC_INT_0 134
+#define ICBID_SLAVE_BIMC_INT_1 135
+#define ICBID_SLAVE_RICA_CFG 136
+#define ICBID_SLAVE_PCNOC_S_5 189
+#define ICBID_SLAVE_PCNOC_INT_2 184
+#define ICBID_SLAVE_PCNOC_INT_3 185
+#define ICBID_SLAVE_PCNOC_INT_4 186
+#define ICBID_SLAVE_PCNOC_INT_5 187
+#define ICBID_SLAVE_PCNOC_INT_6 188
+#define ICBID_SLAVE_USB3_PHY_CFG 182
+#define ICBID_SLAVE_IPA_CFG 183
+
+#endif
diff --git a/include/dt-bindings/soc/msm-bus-rule-ops.h b/include/dt-bindings/soc/msm-bus-rule-ops.h
new file mode 100644
index 0000000000000..5ef9c6d77ff12
--- /dev/null
+++ b/include/dt-bindings/soc/msm-bus-rule-ops.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB 0
+#define FLD_AB 1
+#define FLD_CLK 2
+
+#define OP_LE 0
+#define OP_LT 1
+#define OP_GE 2
+#define OP_GT 3
+#define OP_NOOP 4
+
+#define RULE_STATE_NOT_APPLIED 0
+#define RULE_STATE_APPLIED 1
+
+#define THROTTLE_ON 0
+#define THROTTLE_OFF 1
+
+#endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index da95258127aac..0b238f0798b0e 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -203,6 +203,8 @@ struct clk_ops {
struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
+ struct clk_hw *(*get_safe_parent)(struct clk_hw *hw,
+ unsigned long *safe_freq);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int (*set_rate_and_parent)(struct clk_hw *hw,
@@ -440,7 +442,7 @@ void clk_unregister_divider(struct clk *clk);
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ unsigned int *table;
u32 mask;
u8 shift;
u8 flags;
@@ -458,6 +460,11 @@ struct clk_mux {
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
+unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val,
+ unsigned int *table, unsigned long flags);
+unsigned int clk_mux_reindex(u8 index, unsigned int *table,
+ unsigned long flags);
+
struct clk *clk_register_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
@@ -468,7 +475,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock);
void clk_unregister_mux(struct clk *clk);
@@ -661,11 +668,11 @@ void devm_clk_unregister(struct device *dev, struct clk *clk);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+struct clk *clk_hw_get_clk(const struct clk_hw *hw);
struct clk_hw *__clk_get_hw(struct clk *clk);
unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
-struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
- unsigned int index);
+struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw, u8 index);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
unsigned long __clk_get_flags(struct clk *clk);
@@ -692,7 +699,7 @@ static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
/*
* FIXME clock api without lock protection
*/
-unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
+long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate);
struct of_device_id;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 718e8725de8aa..da1ebdfe3d612 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -67,6 +67,8 @@ struct cpufreq_policy {
unsigned int cpu; /* cpu managing this policy, must be online */
struct clk *clk;
+ struct clk *l2_clk; /* L2 clock */
+ unsigned int l2_rate[3]; /* L2 bus clock rate thresholds */
struct cpufreq_cpuinfo cpuinfo;/* see above */
unsigned int min; /* in kHz */
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index 5246239a49538..27f73c931a6a7 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -19,6 +19,8 @@ enum dma_attr {
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_ALLOC_SINGLE_PAGES,
+ DMA_ATTR_STRONGLY_ORDERED,
+ DMA_ATTR_SKIP_ZEROING,
DMA_ATTR_MAX,
};
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index ef7a6ecd85846..1dd543ca15a53 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -269,6 +269,7 @@ struct device *iommu_device_create(struct device *parent, void *drvdata,
void iommu_device_destroy(struct device *dev);
int iommu_device_link(struct device *dev, struct device *link);
void iommu_device_unlink(struct device *dev, struct device *link);
+int iommu_bus_add_dev(struct device *dev);
/* Window handling function prototypes */
extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
diff --git a/include/linux/ipc_logging.h b/include/linux/ipc_logging.h
new file mode 100644
index 0000000000000..780a82d2f1b4b
--- /dev/null
+++ b/include/linux/ipc_logging.h
@@ -0,0 +1,290 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+ TSV_TYPE_MSG_START = 1,
+ TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+ TSV_TYPE_STRING,
+ TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+ unsigned char type;
+ unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+ struct tsv_header hdr;
+ char buff[MAX_MSG_SIZE];
+ int offset;
+};
+
+struct decode_context {
+ int output_format; /* 0 = debugfs */
+ char *buff; /* output buffer */
+ int size; /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ * Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname,
+ uint16_t user_version);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type: Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_qtimer_write: Writes the current QTimer timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n: Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt: Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt: logging context
+ * @buff: buffer to receive the data
+ * @size: size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized. This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt Decode context
+ * @args printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+ int i; \
+ i = scnprintf(dctxt->buff, dctxt->size, args); \
+ dctxt->buff += i; \
+ dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_qtimer_read: Reads a QTimer timestamp
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt: Context retrieved by reading from log space
+ * @dctxt: Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ * to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ * to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ * which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+ const char *modname, uint16_t user_version)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+ uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_qtimer_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+ void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_qtimer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+ struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+ void (*dfunc)(struct encode_context *,
+ struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h
new file mode 100644
index 0000000000000..d2be552694382
--- /dev/null
+++ b/include/linux/msm-bus-board.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+ DUAL_CTX,
+ ACTIVE_CTX,
+ NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+ unsigned int id;
+ const char *name;
+ struct msm_bus_node_info *info;
+ unsigned int len;
+ int ahb;
+ const char *fabclk[NUM_CTX];
+ const char *iface_clk;
+ unsigned int offset;
+ unsigned int haltid;
+ unsigned int rpm_enabled;
+ unsigned int nmasters;
+ unsigned int nslaves;
+ unsigned int ntieredslaves;
+ bool il_flag;
+ const struct msm_bus_board_algorithm *board_algo;
+ int hw_sel;
+ void *hw_data;
+ uint32_t qos_freq;
+ uint32_t qos_baseoffset;
+ u64 nr_lim_thresh;
+ uint32_t eff_fact;
+ uint32_t qos_delta;
+ bool virt;
+};
+
+struct msm_bus_device_node_registration {
+ struct msm_bus_node_device_type *info;
+ unsigned int num_devices;
+ bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+ MSM_BUS_BW_TIER1 = 1,
+ MSM_BUS_BW_TIER2,
+ MSM_BUS_BW_COUNT,
+ MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+ uint32_t haltval;
+ uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+ ((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+ { \
+ (word) &= ~(fieldmask); \
+ (word) |= (fieldvalue); \
+ }
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+#define RPM_BUS_MASTER_REQ 0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+ RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+ RPM_MASTER_FIELD_BW = 0x00007762,
+ RPM_MASTER_FIELD_BW_T0 = 0x30747762,
+ RPM_MASTER_FIELD_BW_T1 = 0x31747762,
+ RPM_MASTER_FIELD_BW_T2 = 0x32747762,
+};
+
+#include <dt-bindings/soc/msm-bus-ids.h>
+
+
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
new file mode 100644
index 0000000000000..eccd2ae6b4e64
--- /dev/null
+++ b/include/linux/msm-bus.h
@@ -0,0 +1,200 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct qcom_msm_bus_req {
+ u32 key;
+ u32 nbytes;
+ u64 value;
+};
+
+struct msm_bus_vectors {
+ int src; /* Master */
+ int dst; /* Slave */
+ uint64_t ab; /* Arbitrated bandwidth */
+ uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+ int num_paths;
+ struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+ struct msm_bus_paths *usecase;
+ int num_usecases;
+ const char *name;
+ /*
+ * If the active_only flag is set to 1, the BW request is applied
+ * only when at least one CPU is active (powered on). If the flag
+ * is set to 0, then the BW request is always applied irrespective
+ * of the CPU state.
+ */
+ unsigned int active_only;
+};
+
+struct msm_bus_client_handle {
+ char *name;
+ int mas;
+ int slv;
+ int first_hop;
+ u64 cur_ib;
+ u64 cur_ab;
+ bool active_only;
+};
+
+int qcom_rpm_bus_send_message(int ctx, int type, int id,
+ struct qcom_msm_bus_req *req);
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+ bool active_only);
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl);
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+static struct msm_bus_client_handle dummy_cl;
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+ return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+ return 0;
+}
+
+static inline struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+ bool active_only)
+{
+ return &dummy_cl;
+}
+
+static inline void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+}
+
+static inline int
+msm_bus_scale_update_bw(uint32_t cl, u64 ab, u64 ib)
+{
+ return 0;
+}
+
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+ bool active_only);
+int msm_bus_floor_vote(const char *name, u64 floor_hz);
+#else
+static inline int msm_bus_floor_vote(const char *name, u64 floor_hz)
+{
+ return -EINVAL;
+}
+
+static inline int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+ bool active_only)
+{
+ return -EINVAL;
+}
+#endif /*defined(CONFIG_DEBUG_BUS_VOTER) && defined(CONFIG_BUS_TOPOLOGY_ADHOC)*/
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
diff --git a/include/linux/msm_audio.h b/include/linux/msm_audio.h
new file mode 100644
index 0000000000000..04d4e5b56b21b
--- /dev/null
+++ b/include/linux/msm_audio.h
@@ -0,0 +1,367 @@
+/* include/linux/msm_audio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (c) 2012 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_MSM_AUDIO_H
+#define __LINUX_MSM_AUDIO_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/* PCM Audio */
+
+#define AUDIO_IOCTL_MAGIC 'a'
+
+#define AUDIO_START _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned)
+#define AUDIO_STOP _IOW(AUDIO_IOCTL_MAGIC, 1, unsigned)
+#define AUDIO_FLUSH _IOW(AUDIO_IOCTL_MAGIC, 2, unsigned)
+#define AUDIO_GET_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 3, unsigned)
+#define AUDIO_SET_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 4, unsigned)
+#define AUDIO_GET_STATS _IOR(AUDIO_IOCTL_MAGIC, 5, unsigned)
+#define AUDIO_ENABLE_AUDPP _IOW(AUDIO_IOCTL_MAGIC, 6, unsigned)
+#define AUDIO_SET_ADRC _IOW(AUDIO_IOCTL_MAGIC, 7, unsigned)
+#define AUDIO_SET_EQ _IOW(AUDIO_IOCTL_MAGIC, 8, unsigned)
+#define AUDIO_SET_RX_IIR _IOW(AUDIO_IOCTL_MAGIC, 9, unsigned)
+#define AUDIO_SET_VOLUME _IOW(AUDIO_IOCTL_MAGIC, 10, unsigned)
+#define AUDIO_PAUSE _IOW(AUDIO_IOCTL_MAGIC, 11, unsigned)
+#define AUDIO_PLAY_DTMF _IOW(AUDIO_IOCTL_MAGIC, 12, unsigned)
+#define AUDIO_GET_EVENT _IOR(AUDIO_IOCTL_MAGIC, 13, unsigned)
+#define AUDIO_ABORT_GET_EVENT _IOW(AUDIO_IOCTL_MAGIC, 14, unsigned)
+#define AUDIO_REGISTER_PMEM _IOW(AUDIO_IOCTL_MAGIC, 15, unsigned)
+#define AUDIO_DEREGISTER_PMEM _IOW(AUDIO_IOCTL_MAGIC, 16, unsigned)
+#define AUDIO_ASYNC_WRITE _IOW(AUDIO_IOCTL_MAGIC, 17, unsigned)
+#define AUDIO_ASYNC_READ _IOW(AUDIO_IOCTL_MAGIC, 18, unsigned)
+#define AUDIO_SET_INCALL _IOW(AUDIO_IOCTL_MAGIC, 19, struct msm_voicerec_mode)
+#define AUDIO_GET_NUM_SND_DEVICE _IOR(AUDIO_IOCTL_MAGIC, 20, unsigned)
+#define AUDIO_GET_SND_DEVICES _IOWR(AUDIO_IOCTL_MAGIC, 21, \
+ struct msm_snd_device_list)
+#define AUDIO_ENABLE_SND_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 22, unsigned)
+#define AUDIO_DISABLE_SND_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 23, unsigned)
+#define AUDIO_ROUTE_STREAM _IOW(AUDIO_IOCTL_MAGIC, 24, \
+ struct msm_audio_route_config)
+#define AUDIO_GET_PCM_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 30, unsigned)
+#define AUDIO_SET_PCM_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 31, unsigned)
+#define AUDIO_SWITCH_DEVICE _IOW(AUDIO_IOCTL_MAGIC, 32, unsigned)
+#define AUDIO_SET_MUTE _IOW(AUDIO_IOCTL_MAGIC, 33, unsigned)
+#define AUDIO_UPDATE_ACDB _IOW(AUDIO_IOCTL_MAGIC, 34, unsigned)
+#define AUDIO_START_VOICE _IOW(AUDIO_IOCTL_MAGIC, 35, unsigned)
+#define AUDIO_STOP_VOICE _IOW(AUDIO_IOCTL_MAGIC, 36, unsigned)
+#define AUDIO_REINIT_ACDB _IOW(AUDIO_IOCTL_MAGIC, 39, unsigned)
+#define AUDIO_OUTPORT_FLUSH _IOW(AUDIO_IOCTL_MAGIC, 40, unsigned short)
+#define AUDIO_SET_ERR_THRESHOLD_VALUE _IOW(AUDIO_IOCTL_MAGIC, 41, \
+ unsigned short)
+#define AUDIO_GET_BITSTREAM_ERROR_INFO _IOR(AUDIO_IOCTL_MAGIC, 42, \
+ struct msm_audio_bitstream_error_info)
+
+#define AUDIO_SET_SRS_TRUMEDIA_PARAM _IOW(AUDIO_IOCTL_MAGIC, 43, unsigned)
+
+/* Qualcomm extensions */
+#define AUDIO_SET_STREAM_CONFIG _IOW(AUDIO_IOCTL_MAGIC, 80, \
+ struct msm_audio_stream_config)
+#define AUDIO_GET_STREAM_CONFIG _IOR(AUDIO_IOCTL_MAGIC, 81, \
+ struct msm_audio_stream_config)
+#define AUDIO_GET_SESSION_ID _IOR(AUDIO_IOCTL_MAGIC, 82, unsigned short)
+#define AUDIO_GET_STREAM_INFO _IOR(AUDIO_IOCTL_MAGIC, 83, \
+ struct msm_audio_bitstream_info)
+#define AUDIO_SET_PAN _IOW(AUDIO_IOCTL_MAGIC, 84, unsigned)
+#define AUDIO_SET_QCONCERT_PLUS _IOW(AUDIO_IOCTL_MAGIC, 85, unsigned)
+#define AUDIO_SET_MBADRC _IOW(AUDIO_IOCTL_MAGIC, 86, unsigned)
+#define AUDIO_SET_VOLUME_PATH _IOW(AUDIO_IOCTL_MAGIC, 87, \
+ struct msm_vol_info)
+#define AUDIO_SET_MAX_VOL_ALL _IOW(AUDIO_IOCTL_MAGIC, 88, unsigned)
+#define AUDIO_ENABLE_AUDPRE _IOW(AUDIO_IOCTL_MAGIC, 89, unsigned)
+#define AUDIO_SET_AGC _IOW(AUDIO_IOCTL_MAGIC, 90, unsigned)
+#define AUDIO_SET_NS _IOW(AUDIO_IOCTL_MAGIC, 91, unsigned)
+#define AUDIO_SET_TX_IIR _IOW(AUDIO_IOCTL_MAGIC, 92, unsigned)
+#define AUDIO_GET_BUF_CFG _IOW(AUDIO_IOCTL_MAGIC, 93, \
+ struct msm_audio_buf_cfg)
+#define AUDIO_SET_BUF_CFG _IOW(AUDIO_IOCTL_MAGIC, 94, \
+ struct msm_audio_buf_cfg)
+#define AUDIO_SET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 95, \
+ struct msm_acdb_cmd_device)
+#define AUDIO_GET_ACDB_BLK _IOW(AUDIO_IOCTL_MAGIC, 96, \
+ struct msm_acdb_cmd_device)
+
+#define AUDIO_REGISTER_ION _IOW(AUDIO_IOCTL_MAGIC, 97, unsigned)
+#define AUDIO_DEREGISTER_ION _IOW(AUDIO_IOCTL_MAGIC, 98, unsigned)
+
+#define AUDIO_MAX_COMMON_IOCTL_NUM 100
+
+
+#define HANDSET_MIC 0x01
+#define HANDSET_SPKR 0x02
+#define HEADSET_MIC 0x03
+#define HEADSET_SPKR_MONO 0x04
+#define HEADSET_SPKR_STEREO 0x05
+#define SPKR_PHONE_MIC 0x06
+#define SPKR_PHONE_MONO 0x07
+#define SPKR_PHONE_STEREO 0x08
+#define BT_SCO_MIC 0x09
+#define BT_SCO_SPKR 0x0A
+#define BT_A2DP_SPKR 0x0B
+#define TTY_HEADSET_MIC 0x0C
+#define TTY_HEADSET_SPKR 0x0D
+
+/* Default devices are not supported in a */
+/* device switching context. Only supported */
+/* for stream devices. */
+/* DO NOT USE */
+#define DEFAULT_TX 0x0E
+#define DEFAULT_RX 0x0F
+
+#define BT_A2DP_TX 0x10
+
+#define HEADSET_MONO_PLUS_SPKR_MONO_RX 0x11
+#define HEADSET_MONO_PLUS_SPKR_STEREO_RX 0x12
+#define HEADSET_STEREO_PLUS_SPKR_MONO_RX 0x13
+#define HEADSET_STEREO_PLUS_SPKR_STEREO_RX 0x14
+
+#define I2S_RX 0x20
+#define I2S_TX 0x21
+
+#define ADRC_ENABLE 0x0001
+#define EQ_ENABLE 0x0002
+#define IIR_ENABLE 0x0004
+#define QCONCERT_PLUS_ENABLE 0x0008
+#define MBADRC_ENABLE 0x0010
+#define SRS_ENABLE 0x0020
+#define SRS_DISABLE 0x0040
+
+#define AGC_ENABLE 0x0001
+#define NS_ENABLE 0x0002
+#define TX_IIR_ENABLE 0x0004
+#define FLUENCE_ENABLE 0x0008
+
+#define VOC_REC_UPLINK 0x00
+#define VOC_REC_DOWNLINK 0x01
+#define VOC_REC_BOTH 0x02
+
+struct msm_audio_config {
+ uint32_t buffer_size;
+ uint32_t buffer_count;
+ uint32_t channel_count;
+ uint32_t sample_rate;
+ uint32_t type;
+ uint32_t meta_field;
+ uint32_t bits;
+ uint32_t unused[3];
+};
+
+struct msm_audio_stream_config {
+ uint32_t buffer_size;
+ uint32_t buffer_count;
+};
+
+struct msm_audio_buf_cfg{
+ uint32_t meta_info_enable;
+ uint32_t frames_per_buf;
+};
+
+struct msm_audio_stats {
+ uint32_t byte_count;
+ uint32_t sample_count;
+ uint32_t unused[2];
+};
+
+struct msm_audio_ion_info {
+ int fd;
+ void *vaddr;
+};
+
+struct msm_audio_pmem_info {
+ int fd;
+ void *vaddr;
+};
+
+struct msm_audio_aio_buf {
+ void *buf_addr;
+ uint32_t buf_len;
+ uint32_t data_len;
+ void *private_data;
+ unsigned short mfield_sz; /*only useful for data has meta field */
+};
+
+/* Audio routing */
+
+#define SND_IOCTL_MAGIC 's'
+
+#define SND_MUTE_UNMUTED 0
+#define SND_MUTE_MUTED 1
+
+struct msm_mute_info {
+ uint32_t mute;
+ uint32_t path;
+};
+
+struct msm_vol_info {
+ uint32_t vol;
+ uint32_t path;
+};
+
+struct msm_voicerec_mode {
+ uint32_t rec_mode;
+};
+
+struct msm_snd_device_config {
+ uint32_t device;
+ uint32_t ear_mute;
+ uint32_t mic_mute;
+};
+
+#define SND_SET_DEVICE _IOW(SND_IOCTL_MAGIC, 2, struct msm_device_config *)
+
+#define SND_METHOD_VOICE 0
+
+struct msm_snd_volume_config {
+ uint32_t device;
+ uint32_t method;
+ uint32_t volume;
+};
+
+#define SND_SET_VOLUME _IOW(SND_IOCTL_MAGIC, 3, struct msm_snd_volume_config *)
+
+/* Returns the number of SND endpoints supported. */
+
+#define SND_GET_NUM_ENDPOINTS _IOR(SND_IOCTL_MAGIC, 4, unsigned *)
+
+struct msm_snd_endpoint {
+ int id; /* input and output */
+ char name[64]; /* output only */
+};
+
+/* Takes an index between 0 and one less than the number returned by
+ * SND_GET_NUM_ENDPOINTS, and returns the SND index and name of a
+ * SND endpoint. On input, the .id field contains the number of the
+ * endpoint, and on exit it contains the SND index, while .name contains
+ * the description of the endpoint.
+ */
+
+#define SND_GET_ENDPOINT _IOWR(SND_IOCTL_MAGIC, 5, struct msm_snd_endpoint *)
+
+
+#define SND_AVC_CTL _IOW(SND_IOCTL_MAGIC, 6, unsigned *)
+#define SND_AGC_CTL _IOW(SND_IOCTL_MAGIC, 7, unsigned *)
+
+struct msm_audio_pcm_config {
+ uint32_t pcm_feedback; /* 0 - disable > 0 - enable */
+ uint32_t buffer_count; /* Number of buffers to allocate */
+ uint32_t buffer_size; /* Size of buffer for capturing of
+ PCM samples */
+};
+
+#define AUDIO_EVENT_SUSPEND 0
+#define AUDIO_EVENT_RESUME 1
+#define AUDIO_EVENT_WRITE_DONE 2
+#define AUDIO_EVENT_READ_DONE 3
+#define AUDIO_EVENT_STREAM_INFO 4
+#define AUDIO_EVENT_BITSTREAM_ERROR_INFO 5
+
+#define AUDIO_CODEC_TYPE_MP3 0
+#define AUDIO_CODEC_TYPE_AAC 1
+
+struct msm_audio_bitstream_info {
+ uint32_t codec_type;
+ uint32_t chan_info;
+ uint32_t sample_rate;
+ uint32_t bit_stream_info;
+ uint32_t bit_rate;
+ uint32_t unused[3];
+};
+
+struct msm_audio_bitstream_error_info {
+ uint32_t dec_id;
+ uint32_t err_msg_indicator;
+ uint32_t err_type;
+};
+
+union msm_audio_event_payload {
+ struct msm_audio_aio_buf aio_buf;
+ struct msm_audio_bitstream_info stream_info;
+ struct msm_audio_bitstream_error_info error_info;
+ int reserved;
+};
+
+struct msm_audio_event {
+ int event_type;
+ int timeout_ms;
+ union msm_audio_event_payload event_payload;
+};
+
+#define MSM_SNDDEV_CAP_RX 0x1
+#define MSM_SNDDEV_CAP_TX 0x2
+#define MSM_SNDDEV_CAP_VOICE 0x4
+
+struct msm_snd_device_info {
+ uint32_t dev_id;
+ uint32_t dev_cap; /* bitmask describe capability of device */
+ char dev_name[64];
+};
+
+struct msm_snd_device_list {
+ uint32_t num_dev; /* Indicate number of device info to be retrieved */
+ struct msm_snd_device_info *list;
+};
+
+struct msm_dtmf_config {
+ uint16_t path;
+ uint16_t dtmf_hi;
+ uint16_t dtmf_low;
+ uint16_t duration;
+ uint16_t tx_gain;
+ uint16_t rx_gain;
+ uint16_t mixing;
+};
+
+#define AUDIO_ROUTE_STREAM_VOICE_RX 0
+#define AUDIO_ROUTE_STREAM_VOICE_TX 1
+#define AUDIO_ROUTE_STREAM_PLAYBACK 2
+#define AUDIO_ROUTE_STREAM_REC 3
+
+struct msm_audio_route_config {
+ uint32_t stream_type;
+ uint32_t stream_id;
+ uint32_t dev_id;
+};
+
+#define AUDIO_MAX_EQ_BANDS 12
+
+struct msm_audio_eq_band {
+ uint16_t band_idx; /* The band index, 0 .. 11 */
+ uint32_t filter_type; /* Filter band type */
+ uint32_t center_freq_hz; /* Filter band center frequency */
+ uint32_t filter_gain; /* Filter band initial gain (dB) */
+ /* Range is +12 dB to -12 dB with 1dB increments. */
+ uint32_t q_factor;
+} __attribute__ ((packed));
+
+struct msm_audio_eq_stream_config {
+ uint32_t enable; /* Number of consequtive bands specified */
+ uint32_t num_bands;
+ struct msm_audio_eq_band eq_bands[AUDIO_MAX_EQ_BANDS];
+} __attribute__ ((packed));
+
+struct msm_acdb_cmd_device {
+ uint32_t command_id;
+ uint32_t device_id;
+ uint32_t network_id;
+ uint32_t sample_rate_id; /* Actual sample rate value */
+ uint32_t interface_id; /* See interface id's above */
+ uint32_t algorithm_block_id; /* See enumerations above */
+ uint32_t total_bytes; /* Length in bytes used by buffer */
+ uint32_t *phys_buf; /* Physical Address of data */
+};
+
+
+#endif
diff --git a/include/linux/msm_audio_acdb.h b/include/linux/msm_audio_acdb.h
new file mode 100644
index 0000000000000..e7f06b53ac84b
--- /dev/null
+++ b/include/linux/msm_audio_acdb.h
@@ -0,0 +1,81 @@
+#ifndef __MSM_AUDIO_ACDB_H
+#define __MSM_AUDIO_ACDB_H
+
+#include <linux/msm_audio.h>
+
+#define AUDIO_SET_VOCPROC_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+0), unsigned)
+#define AUDIO_SET_VOCPROC_STREAM_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+1), unsigned)
+#define AUDIO_SET_VOCPROC_VOL_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+2), unsigned)
+#define AUDIO_SET_AUDPROC_RX_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+3), unsigned)
+#define AUDIO_SET_AUDPROC_RX_STREAM_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+4), unsigned)
+#define AUDIO_SET_AUDPROC_RX_VOL_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+5), unsigned)
+#define AUDIO_SET_AUDPROC_TX_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+6), unsigned)
+#define AUDIO_SET_AUDPROC_TX_STREAM_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+7), unsigned)
+#define AUDIO_SET_AUDPROC_TX_VOL_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+8), unsigned)
+#define AUDIO_SET_SIDETONE_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+9), unsigned)
+#define AUDIO_SET_ANC_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+10), unsigned)
+#define AUDIO_SET_VOICE_RX_TOPOLOGY _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+11), unsigned)
+#define AUDIO_SET_VOICE_TX_TOPOLOGY _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+12), unsigned)
+#define AUDIO_SET_ADM_RX_TOPOLOGY _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+13), unsigned)
+#define AUDIO_SET_ADM_TX_TOPOLOGY _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+14), unsigned)
+#define AUDIO_SET_ASM_TOPOLOGY _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+15), unsigned)
+#define AUDIO_SET_AFE_TX_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+16), unsigned)
+#define AUDIO_SET_AFE_RX_CAL _IOW(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_COMMON_IOCTL_NUM+17), unsigned)
+
+
+#define AUDIO_MAX_ACDB_IOCTL (AUDIO_MAX_COMMON_IOCTL_NUM+30)
+
+/* ACDB structures */
+struct cal_block {
+ uint32_t cal_size; /* Size of Cal Data */
+ uint32_t cal_offset; /* offset pointer to Cal Data */
+};
+
+struct sidetone_cal {
+ uint16_t enable;
+ uint16_t gain;
+};
+
+/* For Real-Time Audio Calibration */
+#define AUDIO_GET_RTAC_ADM_INFO _IOR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+1), unsigned)
+#define AUDIO_GET_RTAC_VOICE_INFO _IOR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+2), unsigned)
+#define AUDIO_GET_RTAC_ADM_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+3), unsigned)
+#define AUDIO_SET_RTAC_ADM_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+4), unsigned)
+#define AUDIO_GET_RTAC_ASM_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+5), unsigned)
+#define AUDIO_SET_RTAC_ASM_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+6), unsigned)
+#define AUDIO_GET_RTAC_CVS_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+7), unsigned)
+#define AUDIO_SET_RTAC_CVS_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+8), unsigned)
+#define AUDIO_GET_RTAC_CVP_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+9), unsigned)
+#define AUDIO_SET_RTAC_CVP_CAL _IOWR(AUDIO_IOCTL_MAGIC, \
+ (AUDIO_MAX_ACDB_IOCTL+10), unsigned)
+
+#define AUDIO_MAX_RTAC_IOCTL (AUDIO_MAX_ACDB_IOCTL+20)
+
+#endif /* __MSM_AUDIO_ACDB_H */
diff --git a/include/linux/msm_bus_rules.h b/include/linux/msm_bus_rules.h
new file mode 100644
index 0000000000000..a045223dc7a88
--- /dev/null
+++ b/include/linux/msm_bus_rules.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H
+#define _ARCH_ARM_MACH_MSM_BUS_RULES_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <dt-bindings/soc/msm-bus-rule-ops.h>
+
+#define MAX_NODES (5)
+
+struct rule_update_path_info {
+ u32 id;
+ u64 ab;
+ u64 ib;
+ u64 clk;
+ struct list_head link;
+};
+
+struct rule_apply_rcm_info {
+ u32 id;
+ u64 lim_bw;
+ int throttle;
+ bool after_clk_commit;
+ struct list_head link;
+};
+
+struct bus_rule_type {
+ int num_src;
+ int *src_id;
+ int src_field;
+ int op;
+ u64 thresh;
+ int num_dst;
+ int *dst_node;
+ u64 dst_bw;
+ int mode;
+ void *client_data;
+};
+
+#if (defined(CONFIG_BUS_TOPOLOGY_ADHOC))
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb);
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb);
+void print_rules_buf(char *buf, int count);
+bool msm_rule_are_rules_registered(void);
+#else
+static inline void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+}
+static inline void msm_rule_unregister(int num_rules,
+ struct bus_rule_type *rule,
+ struct notifier_block *nb)
+{
+}
+static inline void print_rules_buf(char *buf, int count)
+{
+}
+static inline bool msm_rule_are_rules_registered(void)
+{
+ return false;
+}
+#endif /* defined(CONFIG_BUS_TOPOLOGY_ADHOC) */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */
diff --git a/include/linux/msm_iommu_domains.h b/include/linux/msm_iommu_domains.h
new file mode 100644
index 0000000000000..811d7737ddb5d
--- /dev/null
+++ b/include/linux/msm_iommu_domains.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_IOMMU_DOMAINS_H
+#define _LINUX_MSM_IOMMU_DOMAINS_H
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/genalloc.h>
+#include <linux/rbtree.h>
+#include <linux/dma-buf.h>
+
+#define MSM_IOMMU_DOMAIN_SECURE 0x1
+
+enum {
+ VIDEO_DOMAIN,
+ CAMERA_DOMAIN,
+ DISPLAY_READ_DOMAIN,
+ DISPLAY_WRITE_DOMAIN,
+ ROTATOR_SRC_DOMAIN,
+ ROTATOR_DST_DOMAIN,
+ MAX_DOMAINS
+};
+
+enum {
+ VIDEO_FIRMWARE_POOL,
+ VIDEO_MAIN_POOL,
+ GEN_POOL,
+};
+
+struct mem_pool {
+ struct mutex pool_mutex;
+ struct gen_pool *gpool;
+ phys_addr_t paddr;
+ unsigned long size;
+ unsigned long free;
+ unsigned int id;
+};
+
+struct msm_iommu_domain {
+ /* iommu domain to map in */
+ struct iommu_domain *domain;
+ /* total number of allocations from this domain */
+ atomic_t allocation_cnt;
+ /* number of iova pools */
+ int npools;
+ /*
+ * array of gen_pools for allocating iovas.
+ * behavior is undefined if these overlap
+ */
+ struct mem_pool *iova_pools;
+};
+
+struct msm_iommu_domain_name {
+ char *name;
+ int domain;
+};
+
+struct iommu_domains_pdata {
+ struct msm_iommu_domain *domains;
+ int ndomains;
+ struct msm_iommu_domain_name *domain_names;
+ int nnames;
+ unsigned int domain_alloc_flags;
+};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+ unsigned int is_secure;
+};
+
+#if defined(CONFIG_QCOM_IOMMU)
+extern void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name);
+extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova);
+
+extern void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_use_iommu(void);
+
+extern int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached);
+
+extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size);
+
+extern int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val);
+
+extern void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_register_domain(struct msm_iova_layout *layout);
+extern int msm_unregister_domain(struct iommu_domain *domain);
+
+int msm_map_dma_buf(struct dma_buf *dma_buf, struct sg_table *table,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num);
+#else
+static inline void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name)
+{
+}
+
+static inline struct iommu_domain *msm_get_iommu_domain(int subsys_id)
+{
+ return NULL;
+}
+
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova)
+{
+ return -ENOMEM;
+}
+
+static inline void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size) { }
+
+static inline int msm_use_iommu(void)
+{
+ return 0;
+}
+
+static inline int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached)
+{
+ return -ENODEV;
+}
+
+static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+}
+
+static inline int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val)
+{
+ *iova_val = phys;
+ return 0;
+}
+
+static inline void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size)
+{
+}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
+
+static inline int msm_unregister_domain(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int msm_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table,
+ int domain_num, int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num)
+{
+
+}
+
+#endif /* CONFIG_QCOM_IOMMU */
+#endif /* _LINUX_MSM_IOMMU_DOMAINS_H */ \ No newline at end of file
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index a4fcc90b0f208..cd93416d762ed 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -14,6 +14,10 @@
struct nvmem_device;
struct nvmem_cell_info;
+typedef int (*nvmem_reg_read_t)(void *priv, unsigned int offset,
+ void *val, size_t bytes);
+typedef int (*nvmem_reg_write_t)(void *priv, unsigned int offset,
+ void *val, size_t bytes);
struct nvmem_config {
struct device *dev;
@@ -24,6 +28,12 @@ struct nvmem_config {
int ncells;
bool read_only;
bool root_only;
+ nvmem_reg_read_t reg_read;
+ nvmem_reg_write_t reg_write;
+ int size;
+ int word_size;
+ int stride;
+ void *priv;
/* To be only used by old driver/misc/eeprom drivers */
bool compat;
struct device *base_dev;
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687a89dd..4b8cb7da6b4a2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_configure_masks(struct device *dev, struct device_node *np);
+int of_dma_configure_ops(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -98,7 +100,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+static inline void of_dma_configure_masks(struct device *dev,
+ struct device_node *np)
+{}
+static inline int of_dma_configure_ops(struct device *dev,
+ struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f6e9e85164e8b..070f8835780bb 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -17,6 +17,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin);
int of_pci_parse_bus_range(struct device_node *node, struct resource *res);
int of_get_pci_domain_nr(struct device_node *node);
void of_pci_check_probe_only(void);
+int of_pci_map_rid(struct device_node *np, const char *map_name, u32 rid_in,
+ struct device_node **target, u32 *rid_out);
#else
static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
{
@@ -52,6 +54,12 @@ of_get_pci_domain_nr(struct device_node *node)
return -1;
}
+static inline int of_pci_map_rid(struct device_node *np, const char *map_name,
+ u32 rid_in, struct device_node **target, u32 *rid_out)
+{
+ return -EINVAL;
+}
+
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cccaf4a29e9f0..9e274e572588c 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -22,6 +22,7 @@ struct device;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
#if defined(CONFIG_PM_OPP)
@@ -29,6 +30,7 @@ enum dev_pm_opp_event {
unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp);
unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp);
+struct regulator *dev_pm_opp_get_regulator(struct device *dev);
bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
@@ -52,6 +54,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt);
+
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -134,6 +139,12 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
}
+static inline int dev_pm_opp_adjust_voltage(struct device *dev,
+ unsigned long freq, unsigned long u_volt)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
diff --git a/include/linux/qcom_iommu.h b/include/linux/qcom_iommu.h
new file mode 100644
index 0000000000000..54f47b378ed66
--- /dev/null
+++ b/include/linux/qcom_iommu.h
@@ -0,0 +1,388 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_H
+#define MSM_IOMMU_H
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <soc/qcom/socinfo.h>
+
+/* Private pgprot flag */
+#define IOMMU_PRIV 16
+
+extern pgprot_t pgprot_kernel;
+extern struct bus_type msm_iommu_sec_bus_type;
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+
+/* Domain attributes */
+#define MSM_IOMMU_DOMAIN_PT_CACHEABLE 0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE 0x2
+
+/* Mask for the cache policy attribute */
+#define MSM_IOMMU_CP_MASK 0x03
+
+#define DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE DOMAIN_ATTR_MAX
+
+/* Maximum number of Machine IDs that we are allowing to be mapped to the same
+ * context bank. The number of MIDs mapped to the same CB does not affect
+ * performance, but there is a practical limit on how many distinct MIDs may
+ * be present. These mappings are typically determined at design time and are
+ * not expected to change at run time.
+ */
+#define MAX_NUM_MIDS 32
+
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR 128
+
+#define MAX_NUM_BFB_REGS 32
+
+/**
+ * struct msm_iommu_dev - a single IOMMU hardware instance
+ * name Human-readable name given to this IOMMU HW instance
+ * ncb Number of context banks present on this IOMMU HW instance
+ */
+struct msm_iommu_dev {
+ const char *name;
+ int ncb;
+ int ttbr_split;
+};
+
+/**
+ * struct msm_iommu_ctx_dev - an IOMMU context bank instance
+ * name Human-readable name given to this context bank
+ * num Index of this context bank within the hardware
+ * mids List of Machine IDs that are to be mapped into this context
+ * bank, terminated by -1. The MID is a set of signals on the
+ * AXI bus that identifies the function associated with a specific
+ * memory request. (See ARM spec).
+ */
+struct msm_iommu_ctx_dev {
+ const char *name;
+ int num;
+ int mids[MAX_NUM_MIDS];
+};
+
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs An array of register offsets to configure
+ * data Values to write to corresponding registers
+ * length Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+ unsigned int regs[MAX_NUM_BFB_REGS];
+ unsigned int data[MAX_NUM_BFB_REGS];
+ int length;
+};
+
+/**
+ * struct msm_iommu_drvdata - A single IOMMU hardware instance
+ * @base: IOMMU config port base address (VA)
+ * @glb_base: IOMMU config port base address for global register space (VA)
+ * @phys_base: IOMMU physical base address.
+ * @ncb The number of contexts on this IOMMU
+ * @irq: Interrupt number
+ * @core: The bus clock for this IOMMU hardware instance
+ * @iface: The clock for the IOMMU bus interconnect
+ * @name: Human-readable name of this IOMMU device
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev: Struct device this hardware instance is tied to
+ * @list: List head to link all iommus together
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @ctx_attach_count: Count of how many context are attached.
+ * @bus_client : Bus client needed to vote for bus bandwidth.
+ * @needs_rem_spinlock : 1 if remote spinlock is needed, 0 otherwise
+ * @powered_on: Powered status of the IOMMU. 0 means powered off.
+ *
+ * A msm_iommu_drvdata holds the global driver data about a single piece
+ * of an IOMMU hardware instance.
+ */
+struct msm_iommu_drvdata {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ void __iomem *glb_base;
+ void __iomem *cb_base;
+ void __iomem *smmu_local_base;
+ int ncb;
+ int ttbr_split;
+ struct clk *core;
+ struct clk *iface;
+ const char *name;
+ struct msm_iommu_bfb_settings *bfb_settings;
+ int sec_id;
+ struct device *dev;
+ struct list_head list;
+ int halt_enabled;
+ unsigned int ctx_attach_count;
+ unsigned int bus_client;
+ int needs_rem_spinlock;
+ int powered_on;
+ unsigned int model;
+};
+
+/**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on: Turn on power to unit
+ * @iommu_power_off: Turn off power to unit
+ * @iommu_bus_vote: Vote for bus bandwidth
+ * @iommu_clk_on: Turn on clks to unit
+ * @iommu_clk_off: Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+ int (*iommu_power_on)(struct msm_iommu_drvdata *);
+ void (*iommu_power_off)(struct msm_iommu_drvdata *);
+ int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata,
+ unsigned int vote);
+ int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+ void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+ void * (*iommu_lock_initialize)(void);
+ void (*iommu_lock_acquire)(unsigned int need_extra_lock);
+ void (*iommu_lock_release)(unsigned int need_extra_lock);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
+ * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
+ * @num: Hardware context number of this context
+ * @pdev: Platform device associated wit this HW instance
+ * @attached_elm: List element for domains to track which devices are
+ * attached to them
+ * @attached_domain Domain currently attached to this context (if any)
+ * @name Human-readable name of this context device
+ * @sids List of Stream IDs mapped to this context
+ * @nsid Number of Stream IDs mapped to this context
+ * @secure_context true if this is a secure context programmed by
+ the secure environment, false otherwise
+ * @asid ASID used with this context.
+ * @attach_count Number of time this context has been attached.
+ *
+ * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
+ * within each IOMMU hardware instance
+ */
+struct msm_iommu_ctx_drvdata {
+ int num;
+ struct platform_device *pdev;
+ struct list_head attached_elm;
+ struct iommu_domain *attached_domain;
+ const char *name;
+ u32 sids[MAX_NUM_SMR];
+ unsigned int nsid;
+ unsigned int secure_context;
+ int asid;
+ int attach_count;
+ u32 sid_mask[MAX_NUM_SMR];
+ unsigned int n_sid_mask;
+};
+
+enum dump_reg {
+ DUMP_REG_FIRST,
+ DUMP_REG_FAR0 = DUMP_REG_FIRST,
+ DUMP_REG_FAR1,
+ DUMP_REG_PAR0,
+ DUMP_REG_PAR1,
+ DUMP_REG_FSR,
+ DUMP_REG_FSYNR0,
+ DUMP_REG_FSYNR1,
+ DUMP_REG_TTBR0_0,
+ DUMP_REG_TTBR0_1,
+ DUMP_REG_TTBR1_0,
+ DUMP_REG_TTBR1_1,
+ DUMP_REG_SCTLR,
+ DUMP_REG_ACTLR,
+ DUMP_REG_PRRR,
+ DUMP_REG_MAIR0 = DUMP_REG_PRRR,
+ DUMP_REG_NMRR,
+ DUMP_REG_MAIR1 = DUMP_REG_NMRR,
+ DUMP_REG_CBAR_N,
+ DUMP_REG_CBFRSYNRA_N,
+ MAX_DUMP_REGS,
+};
+
+enum dump_reg_type {
+ DRT_CTX_REG,
+ DRT_GLOBAL_REG,
+ DRT_GLOBAL_REG_N,
+};
+
+enum model_id {
+ QSMMUv1 = 1,
+ QSMMUv2,
+ MMU_500 = 500,
+ MAX_MODEL,
+};
+
+struct dump_regs_tbl_entry {
+ /*
+ * To keep things context-bank-agnostic, we only store the
+ * register offset in `reg_offset'
+ */
+ unsigned int reg_offset;
+ const char *name;
+ int must_be_present;
+ enum dump_reg_type dump_reg_type;
+};
+extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower)
+
+struct msm_iommu_context_reg {
+ uint32_t val;
+ bool valid;
+};
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[]);
+
+/*
+ * Interrupt handler for the IOMMU context fault interrupt. Hooking the
+ * interrupt is not supported in the API yet, but this will print an error
+ * message and dump useful IOMMU registers.
+ */
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id);
+
+enum {
+ PROC_APPS,
+ PROC_GPU,
+ PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+ uint32_t flag[PROC_MAX];
+ uint32_t turn;
+};
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops);
+struct iommu_access_ops *msm_get_iommu_access_ops(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+ return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+
+}
+static inline struct iommu_access_ops *msm_get_iommu_access_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+/*
+ * Look up an IOMMU context device by its context name. NULL if none found.
+ * Useful for testing and drivers that do not yet fully have IOMMU stuff in
+ * their platform devices.
+ */
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata);
+int is_vfe_secure(void);
+
+#ifdef CONFIG_MSM_IOMMU_V0
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ static int soc_supports_v0 = -1;
+#ifdef CONFIG_OF
+ struct device_node *node;
+#endif
+
+ if (soc_supports_v0 != -1)
+ return soc_supports_v0;
+
+#ifdef CONFIG_OF
+ node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+ if (node) {
+ soc_supports_v0 = 1;
+ of_node_put(node);
+ return 1;
+ }
+#endif
+ if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ if (cpu_is_msm8x60() &&
+ (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 ||
+ SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1)) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ soc_supports_v0 = 1;
+ return 1;
+}
+#else
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ return 0;
+}
+#endif
+
+int msm_iommu_get_scm_call_avail(void);
+void msm_iommu_check_scm_call_avail(void);
+
+u32 msm_iommu_get_mair0(void);
+u32 msm_iommu_get_mair1(void);
+u32 msm_iommu_get_prrr(void);
+u32 msm_iommu_get_nmrr(void);
+
+/* events for notifiers passed to msm_iommu_register_notify */
+#define TLB_SYNC_TIMEOUT 1
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void msm_iommu_register_notify(struct notifier_block *nb);
+#else
+static inline void msm_iommu_register_notify(struct notifier_block *nb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 9e12000914b34..62d2712dfc738 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -29,6 +29,13 @@ extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_restart_proc(u32 pid, int restart, u32 *resp);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
@@ -38,4 +45,28 @@ extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
+extern int qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr);
+extern int qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len);
+extern int qcom_scm_pil_auth_and_reset_cmd(u32 proc);
+extern int qcom_scm_pil_shutdown_cmd(u32 proc);
+
+extern int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr,
+ u32 len);
+extern int qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2]);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va,
+ u32 info_size, u32 flags);
+extern int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags);
+
+extern int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int qcom_scm_get_feat_version(u32 feat);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+
+extern int qcom_scm_set_video_state(u32 state, u32 spare);
+extern int qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size);
#endif
diff --git a/include/linux/regulator/qcom_smd-regulator.h b/include/linux/regulator/qcom_smd-regulator.h
new file mode 100644
index 0000000000000..16029448d6b62
--- /dev/null
+++ b/include/linux/regulator/qcom_smd-regulator.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SMD_REGULATOR_H_
+#define __QCOM_SMD_REGULATOR_H_
+
+#if IS_ENABLED(CONFIG_REGULATOR_QCOM_SMD_RPM)
+int qcom_rpm_set_floor(struct regulator *regulator, int floor);
+int qcom_rpm_set_corner(struct regulator *regulator, int corner);
+#else
+static inline int qcom_rpm_set_floor(struct regulator *regulator, int floor)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_rpm_set_corner(struct regulator *regulator, int corner)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c4e1384f6360..1c457a8dd5a6a 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -365,6 +365,8 @@ enum rproc_state {
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
+ * @RPROC_WATCHDOG: watchdog bite
+ * @RPROC_FATAL_ERROR fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -373,6 +375,8 @@ enum rproc_state {
*/
enum rproc_crash_type {
RPROC_MMUFAULT,
+ RPROC_WATCHDOG,
+ RPROC_FATAL_ERROR,
};
/**
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 2a53dcaeeeed4..ebdabd669d932 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -26,6 +26,10 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_SMPB 0x62706d73
#define QCOM_SMD_RPM_SPDM 0x63707362
#define QCOM_SMD_RPM_VSA 0x00617376
+#define QCOM_SMD_RPM_MMAXI_CLK 0x69786d6d
+#define QCOM_SMD_RPM_IPA_CLK 0x617069
+#define QCOM_SMD_RPM_CE_CLK 0x6563
+#define QCOM_SMD_RPM_AGGR_CLK 0x72676761
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d0cb6d189a0a0..19a08d8b2e468 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -7,6 +7,7 @@
struct qcom_smd;
struct qcom_smd_channel;
struct qcom_smd_lookup;
+struct qcom_smd_device;
/**
* struct qcom_smd_id - struct used for matching a smd device
@@ -26,6 +27,8 @@ struct qcom_smd_device {
struct qcom_smd_channel *channel;
};
+typedef int (*qcom_smd_cb_t)(void *, const void *, size_t);
+
/**
* struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver
@@ -42,16 +45,24 @@ struct qcom_smd_driver {
int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev);
- int (*callback)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t callback;
};
int qcom_smd_driver_register(struct qcom_smd_driver *drv);
void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
+void qcom_ipc_bus_register(struct bus_type *bus);
+
+void *qcom_smd_get_drvdata(void *ch);
+void qcom_smd_set_drvdata(void *ch, void *data);
-#define module_qcom_smd_driver(__smd_driver) \
- module_driver(__smd_driver, qcom_smd_driver_register, \
+#define module_qcom_smd_driver(__ipc_driver) \
+ module_driver(__ipc_driver, qcom_smd_driver_register, \
qcom_smd_driver_unregister)
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
+ const char *name,
+ qcom_smd_cb_t cb);
+
#endif
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 017fced60242e..1b49495eedf16 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -17,7 +17,7 @@ extern int swiotlb_force;
* must be a power of 2. What is the appropriate value ?
* The complexity of {map,unmap}_single is linearly dependent on this value.
*/
-#define IO_TLB_SEGSIZE 128
+#define IO_TLB_SEGSIZE 256
/*
* log of the size of each IO TLB slab. The number of slabs is command line
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
new file mode 100644
index 0000000000000..f67cedb8b3199
--- /dev/null
+++ b/include/soc/qcom/glink.h
@@ -0,0 +1,443 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_H_
+#define _SOC_QCOM_GLINK_H_
+
+#include <linux/types.h>
+
+/* Maximum size (including null) for channel, edge, or transport names */
+#define GLINK_NAME_SIZE 32
+
+/* Maximum packet size for TX and RX */
+#define GLINK_MAX_PKT_SIZE SZ_1M
+
+/**
+ * G-Link Port State Notification Values
+ */
+enum {
+ GLINK_CONNECTED,
+ GLINK_LOCAL_DISCONNECTED,
+ GLINK_REMOTE_DISCONNECTED,
+};
+
+/**
+ * G-Link Open Options
+ *
+ * Used to define the glink_open_config::options field which is passed into
+ * glink_open().
+ */
+enum {
+ GLINK_OPT_INITIAL_XPORT = BIT(0),
+ GLINK_OPT_RX_INTENT_NOTIF = BIT(1),
+};
+
+/**
+ * Open configuration.
+ *
+ * priv: Private data passed into user callbacks
+ * options: Open option flags
+ * rx_intent_req_timeout_ms: Timeout for requesting an RX intent, in
+ * milliseconds; if set to 0, timeout is infinite
+ * notify_rx: Receive notification function (required)
+ * notify_tx_done: Transmit-done notification function (required)
+ * notify_state: State-change notification (required)
+ * notify_rx_intent_req: Receive intent request (optional)
+ * notify_rxv: Receive notification function for vector buffers
+ * (required if notify_rx is not provided)
+ * notify_sig: Signal-change notification (optional)
+ * notify_rx_tracer_pkt: Receive notification for tracer packet
+ * notify_remote_rx_intent: Receive notification for remote-queued RX intent
+ *
+ * This structure is passed into the glink_open() call to setup
+ * configuration handles. All unused fields should be set to 0.
+ *
+ * The structure is copied internally before the call to glink_open() returns.
+ */
+struct glink_open_config {
+ void *priv;
+ uint32_t options;
+
+ const char *transport;
+ const char *edge;
+ const char *name;
+ unsigned int rx_intent_req_timeout_ms;
+
+ int (*notify_rx)(void *handle, const void *ptr, size_t size);
+ void (*notify_tx_done)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr);
+ void (*notify_state)(void *handle, const void *priv, unsigned event);
+ bool (*notify_rx_intent_req)(void *handle, const void *priv,
+ size_t req_size);
+ void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset,
+ size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset,
+ size_t *size));
+ void (*notify_rx_sigs)(void *handle, const void *priv,
+ uint32_t old_sigs, uint32_t new_sigs);
+ void (*notify_rx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_tx_abort)(void *handle, const void *priv,
+ const void *pkt_priv);
+ void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+ const void *pkt_priv, const void *ptr, size_t size);
+ void (*notify_remote_rx_intent)(void *handle, const void *priv,
+ size_t size);
+};
+
+enum glink_link_state {
+ GLINK_LINK_STATE_UP,
+ GLINK_LINK_STATE_DOWN,
+};
+
+/**
+ * Data structure containing information during Link State callback
+ * transport: String identifying the transport.
+ * edge: String identifying the edge.
+ * link_state: Link state(UP?DOWN).
+ */
+struct glink_link_state_cb_info {
+ const char *transport;
+ const char *edge;
+ enum glink_link_state link_state;
+};
+
+/**
+ * Data structure containing information for link state registration
+ * transport: String identifying the transport.
+ * edge: String identifying the edge.
+ * glink_link_state_notif_cb: Callback function used to pass the event.
+ */
+struct glink_link_info {
+ const char *transport;
+ const char *edge;
+ void (*glink_link_state_notif_cb)(
+ struct glink_link_state_cb_info *cb_info,
+ void *priv);
+};
+
+enum tx_flags {
+ GLINK_TX_REQ_INTENT = 0x1,
+ GLINK_TX_SINGLE_THREADED = 0x2,
+ GLINK_TX_TRACER_PKT = 0x4,
+ GLINK_TX_ATOMIC = 0x8,
+};
+
+#if IS_ENABLED(CONFIG_MSM_GLINK)
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr: Open configuration structure (the structure is copied before
+ * glink_open returns). All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return: Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg_ptr);
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle: handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return: 0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle);
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @data: pointer to the data
+ * @size: size of data
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side
+ * has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+ uint32_t tx_flags);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data type that is returned when a packet is received
+ * size: maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent of size exists.
+ *
+ * @handle: handle returned by glink_open()
+ * @size: size of an intent to check or 0 for any intent
+ *
+ * Return: TRUE if an intent exists with greater than or equal to the size
+ * else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle: handle returned by glink_open()
+ * @ptr: data pointer provided in the notify_rx() call
+ * @reuse: if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle: handle returned by glink_open()
+ * @pkt_priv: opaque data value that will be returned to client with
+ * notify_tx_done notification
+ * @iovec: pointer to the vector (must remain valid until notify_tx_done
+ * notification)
+ * @size: size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ * in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ * in virtual address space
+ * @tx_flags: Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ * transmit operation (not fully opened); -EAGAIN if remote side has
+ * not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle: handle returned by glink_open()
+ * @sigs: modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle: handle returned by glink_open()
+ * sigs: Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info: Data structure containing the link identification and callback.
+ * @priv: Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+ void *priv);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle: Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle: Channel handle in which the latency is required.
+ * @latency_us: Latency requirement in units of micro-seconds.
+ * @pkt_size: Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle: Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle: Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle: Channel handle for which the QoS ramp time is required.
+ * @pkt_size: Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+
+void *qcom_glink_get_drvdata(void *ch);
+void qcom_glink_set_drvdata(void *ch, void *data);
+int qcom_glink_driver_register(void *drv);
+void qcom_glink_driver_unregister(void *drv);
+#else /* CONFIG_MSM_GLINK */
+static inline void *glink_open(const struct glink_open_config *cfg_ptr)
+{
+ return NULL;
+}
+
+static inline int glink_close(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_tx(void *handle, void *pkt_priv, void *data,
+ size_t size, uint32_t tx_flags)
+{
+ return -ENODEV;
+}
+
+static inline int glink_queue_rx_intent(void *handle, const void *pkt_priv,
+ size_t size)
+{
+ return -ENODEV;
+}
+
+static inline bool glink_rx_intent_exists(void *handle, size_t size)
+{
+ return -ENODEV;
+}
+
+static inline int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+ return -ENODEV;
+}
+
+static inline int glink_txv(void *handle, void *pkt_priv,
+ void *iovec, size_t size,
+ void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+ void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+ uint32_t tx_flags)
+{
+ return -ENODEV;
+}
+
+static inline int glink_sigs_set(void *handle, uint32_t sigs)
+{
+ return -ENODEV;
+}
+
+static inline int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+ return -ENODEV;
+}
+
+static inline int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+ return -ENODEV;
+}
+
+static inline void *glink_register_link_state_cb(
+ struct glink_link_info *link_info, void *priv)
+{
+ return NULL;
+}
+
+static inline void glink_unregister_link_state_cb(void *notif_handle)
+{
+}
+
+static inline int glink_qos_latency(void *handle, unsigned long latency_us,
+ size_t pkt_size)
+{
+ return -ENODEV;
+}
+
+static inline int glink_qos_cancel(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_qos_start(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline unsigned long glink_qos_get_ramp_time(void *handle,
+ size_t pkt_size)
+{
+ return 0;
+}
+
+void *qcom_glink_get_drvdata(void *ch)
+{
+ return NULL;
+}
+
+void qcom_glink_set_drvdata(void *ch, void *data) {}
+int qcom_glink_driver_register(void *drv) {}
+void qcom_glink_driver_unregister(void *drv) {}
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/glink_rpm_xprt.h b/include/soc/qcom/glink_rpm_xprt.h
new file mode 100644
index 0000000000000..8dfd43783e574
--- /dev/null
+++ b/include/soc/qcom/glink_rpm_xprt.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_RPM_XPRT_H_
+#define _SOC_QCOM_GLINK_RPM_XPRT_H_
+
+#include <linux/types.h>
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle: Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle: Channel handle in which this operation is performed.
+ * @mask: Flag to mask or unmask the interrupt.
+ * @pstruct: Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct);
+
+/**
+ * glink_wait_link_down() - Return whether read/write indices in FIFO are all 0.
+ * @handle: Channel handle in which this operation is performed.
+ *
+ * This function returns the status of the read/write indices in the FIFO.
+ *
+ * Return: 1 if the indices are all 0, 0 otherwise.
+ */
+int glink_wait_link_down(void *handle);
+
+#else
+static inline int glink_rpm_rx_poll(void *handle)
+{
+ return -ENODEV;
+}
+
+static inline int glink_rpm_mask_rx_interrupt(void *handle, bool mask,
+ void *pstruct)
+{
+ return -ENODEV;
+}
+static inline int glink_wait_link_down(void *handle)
+{
+ return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+
+#endif /* _SOC_QCOM_GLINK_RPM_XPRT_H_ */
diff --git a/include/soc/qcom/rpm-notifier.h b/include/soc/qcom/rpm-notifier.h
new file mode 100644
index 0000000000000..ea6d95e313a8d
--- /dev/null
+++ b/include/soc/qcom/rpm-notifier.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+#define __ARCH_ARM_MACH_MSM_RPM_NOTIF_H
+
+struct msm_rpm_notifier_data {
+ uint32_t rsc_type;
+ uint32_t rsc_id;
+ uint32_t key;
+ uint32_t size;
+ uint8_t *value;
+};
+/**
+ * msm_rpm_register_notifier - Register for sleep set notifications
+ *
+ * @nb - notifier block to register
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_register_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_unregister_notifier - Unregister previously registered notifications
+ *
+ * @nb - notifier block to unregister
+ *
+ * return 0 on success, errno on failure.
+ */
+int msm_rpm_unregister_notifier(struct notifier_block *nb);
+
+/**
+ * msm_rpm_enter_sleep - Notify RPM driver to prepare for entering sleep
+ *
+ * @bool - flag to enable print contents of sleep buffer.
+ * @cpumask - cpumask of next wakeup cpu
+ *
+ * return 0 on success errno on failure.
+ */
+int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask);
+
+/**
+ * msm_rpm_exit_sleep - Notify RPM driver about resuming from power collapse
+ */
+void msm_rpm_exit_sleep(void);
+
+/**
+ * msm_rpm_waiting_for_ack - Indicate if there is RPM message
+ * pending acknowledgement.
+ * returns true for pending messages and false otherwise
+ */
+bool msm_rpm_waiting_for_ack(void);
+
+#endif /*__ARCH_ARM_MACH_MSM_RPM_NOTIF_H */
diff --git a/include/soc/qcom/rpm-smd.h b/include/soc/qcom/rpm-smd.h
new file mode 100644
index 0000000000000..020ffe3082847
--- /dev/null
+++ b/include/soc/qcom/rpm-smd.h
@@ -0,0 +1,309 @@
+/* Copyright (c) 2012, 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_RPM_SMD_H
+#define __ARCH_ARM_MACH_MSM_RPM_SMD_H
+
+/**
+ * enum msm_rpm_set - RPM enumerations for sleep/active set
+ * %MSM_RPM_CTX_SET_0: Set resource parameters for active mode.
+ * %MSM_RPM_CTX_SET_SLEEP: Set resource parameters for sleep.
+ */
+enum msm_rpm_set {
+ MSM_RPM_CTX_ACTIVE_SET,
+ MSM_RPM_CTX_SLEEP_SET,
+};
+
+struct msm_rpm_request;
+
+struct msm_rpm_kvp {
+ uint32_t key;
+ uint32_t length;
+ uint8_t *data;
+};
+#ifdef CONFIG_MSM_RPM_SMD
+/**
+ * msm_rpm_request() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_request_noirq() - Creates a parent element to identify the
+ * resource on the RPM, that stores the KVPs for different fields modified
+ * for a hardware resource. This function is similar to msm_rpm_create_request
+ * except that it has to be called with interrupts masked.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @num_elements: number of KVPs pairs associated with the resource
+ *
+ * returns pointer to a msm_rpm_request on success, NULL on error
+ */
+struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements);
+
+/**
+ * msm_rpm_add_kvp_data() - Adds a Key value pair to a existing RPM resource.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/**
+ * msm_rpm_add_kvp_data_noirq() - Adds a Key value pair to a existing RPM
+ * resource. This function is similar to msm_rpm_add_kvp_data except that it
+ * has to be called with interrupts masked.
+ *
+ * @handle: RPM resource handle to which the data should be appended
+ * @key: unsigned integer identify the parameter modified
+ * @data: byte array that contains the value corresponding to key.
+ * @size: size of data in bytes.
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int size);
+
+/** msm_rpm_free_request() - clean up the RPM request handle created with
+ * msm_rpm_create_request
+ *
+ * @handle: RPM resource handle to be cleared.
+ */
+
+void msm_rpm_free_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noack() - Send the RPM messages using SMD. The function
+ * assigns a message id before sending the data out to the RPM. RPM hardware
+ * uses the message id to acknowledge the messages, but this API does not wait
+ * on the ACK for this message id and it does not add the message id to the wait
+ * list.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns NULL on success and PTR_ERR on a failed transaction.
+ */
+void *msm_rpm_send_request_noack(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_send_request_noirq() - Send the RPM messages using SMD. The
+ * function assigns a message id before sending the data out to the RPM.
+ * RPM hardware uses the message id to acknowledge the messages. This function
+ * is similar to msm_rpm_send_request except that it has to be called with
+ * interrupts masked.
+ *
+ * @handle: pointer to the msm_rpm_request for the resource being modified.
+ *
+ * returns non-zero message id on success and zero on a failed transaction.
+ * The drivers use message id to wait for ACK from RPM.
+ */
+int msm_rpm_send_request_noirq(struct msm_rpm_request *handle);
+
+/**
+ * msm_rpm_wait_for_ack() - A blocking call that waits for acknowledgment of
+ * a message from RPM.
+ *
+ * @msg_id: the return from msm_rpm_send_requests
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack(uint32_t msg_id);
+
+/**
+ * msm_rpm_wait_for_ack_noirq() - A blocking call that waits for acknowledgment
+ * of a message from RPM. This function is similar to msm_rpm_wait_for_ack
+ * except that it has to be called with interrupts masked.
+ *
+ * @msg_id: the return from msm_rpm_send_request
+ *
+ * returns 0 on success or errno
+ */
+int msm_rpm_wait_for_ack_noirq(uint32_t msg_id);
+
+/**
+ * msm_rpm_send_message() -Wrapper function for clients to send data given an
+ * array of key value pairs.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noack() -Wrapper function for clients to send data
+ * given an array of key value pairs without waiting for ack.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns NULL on success and PTR_ERR(errno) on failure.
+ */
+void *msm_rpm_send_message_noack(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_send_message_noirq() -Wrapper function for clients to send data
+ * given an array of key value pairs. This function is similar to the
+ * msm_rpm_send_message() except that it has to be called with interrupts
+ * disabled. Clients should choose the irq version when possible for system
+ * performance.
+ *
+ * @set: if the device is setting the active/sleep set parameter
+ * for the resource
+ * @rsc_type: unsigned 32 bit integer that identifies the type of the resource
+ * @rsc_id: unsigned 32 bit that uniquely identifies a resource within a type
+ * @kvp: array of KVP data.
+ * @nelem: number of KVPs pairs associated with the message.
+ *
+ * returns 0 on success and errno on failure.
+ */
+int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems);
+
+/**
+ * msm_rpm_driver_init() - Initialization function that registers for a
+ * rpm platform driver.
+ *
+ * returns 0 on success.
+ */
+int __init msm_rpm_driver_init(void);
+
+#else
+
+static inline struct msm_rpm_request *msm_rpm_create_request(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+}
+
+static inline struct msm_rpm_request *msm_rpm_create_request_noirq(
+ enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, int num_elements)
+{
+ return NULL;
+
+}
+static inline uint32_t msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
+ uint32_t key, const uint8_t *data, int count)
+{
+ return 0;
+}
+static inline uint32_t msm_rpm_add_kvp_data_noirq(
+ struct msm_rpm_request *handle, uint32_t key,
+ const uint8_t *data, int count)
+{
+ return 0;
+}
+
+static inline void msm_rpm_free_request(struct msm_rpm_request *handle)
+{
+ return;
+}
+
+static inline int msm_rpm_send_request(struct msm_rpm_request *handle)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
+{
+ return 0;
+
+}
+
+static inline void *msm_rpm_send_request_noack(struct msm_rpm_request *handle)
+{
+ return NULL;
+}
+
+static inline int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
+ uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
+{
+ return 0;
+}
+
+static inline int msm_rpm_send_message_noirq(enum msm_rpm_set set,
+ uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+ int nelems)
+{
+ return 0;
+}
+
+static inline void *msm_rpm_send_message_noack(enum msm_rpm_set set,
+ uint32_t rsc_type, uint32_t rsc_id, struct msm_rpm_kvp *kvp,
+ int nelems)
+{
+ return NULL;
+}
+
+static inline int msm_rpm_wait_for_ack(uint32_t msg_id)
+{
+ return 0;
+
+}
+static inline int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
+{
+ return 0;
+}
+
+static inline int __init msm_rpm_driver_init(void)
+{
+ return 0;
+}
+#endif
+#endif /*__ARCH_ARM_MACH_MSM_RPM_SMD_H*/
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 0000000000000..29715506ed463
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,610 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard() \
+ of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp() of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd() of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm() of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf() of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc() of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074() of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926() of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msmferrum() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmferrum")
+#define early_machine_is_msm8916() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#else
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define of_board_is_dragonboard() 0
+#define of_board_is_cdp() 0
+#define of_board_is_mtp() 0
+#define of_board_is_qrd() 0
+#define of_board_is_xpm() 0
+#define of_board_is_skuf() 0
+#define of_board_is_sbc() 0
+
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+#define machine_is_apq8074() 0
+#define machine_is_msm8926() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_msmferrum() 0
+#define early_machine_is_msm8916() 0
+#define early_machine_is_msm8936() 0
+#define early_machine_is_msm8939() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_mdm9630() 0
+#define early_machine_is_fsm9900() 0
+#define early_machine_is_fsm9010() 0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM 1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE 6
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_8X50A,
+ MSM_CPU_7X30,
+ MSM_CPU_8X55,
+ MSM_CPU_8X60,
+ MSM_CPU_8960,
+ MSM_CPU_8960AB,
+ MSM_CPU_7X27A,
+ FSM_CPU_9XXX,
+ MSM_CPU_7X25A,
+ MSM_CPU_7X25AA,
+ MSM_CPU_7X25AB,
+ MSM_CPU_8064,
+ MSM_CPU_8064AB,
+ MSM_CPU_8064AA,
+ MSM_CPU_8930,
+ MSM_CPU_8930AA,
+ MSM_CPU_8930AB,
+ MSM_CPU_7X27AA,
+ MSM_CPU_9615,
+ MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
+ MSM_CPU_8627,
+ MSM_CPU_8625,
+ MSM_CPU_9625,
+ MSM_CPU_FERRUM,
+ MSM_CPU_8916,
+ MSM_CPU_8936,
+ MSM_CPU_8939,
+ MSM_CPU_8226,
+ MSM_CPU_8610,
+ MSM_CPU_8625Q,
+ MSM_CPU_8084,
+ MSM_CPU_9630,
+ FSM_CPU_9900,
+ MSM_CPU_ZIRC,
+ MSM_CPU_8994,
+ MSM_CPU_8992,
+ FSM_CPU_9010,
+};
+
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
+enum pmic_model {
+ PMIC_MODEL_PM8058 = 13,
+ PMIC_MODEL_PM8028 = 14,
+ PMIC_MODEL_PM8901 = 15,
+ PMIC_MODEL_PM8027 = 16,
+ PMIC_MODEL_ISL_9519 = 17,
+ PMIC_MODEL_PM8921 = 18,
+ PMIC_MODEL_PM8018 = 19,
+ PMIC_MODEL_PM8015 = 20,
+ PMIC_MODEL_PM8014 = 21,
+ PMIC_MODEL_PM8821 = 22,
+ PMIC_MODEL_PM8038 = 23,
+ PMIC_MODEL_PM8922 = 24,
+ PMIC_MODEL_PM8917 = 25,
+ PMIC_MODEL_UNKNOWN = 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+const int read_msm_cpu_type(void);
+const int get_core_count(void);
+const int cpu_is_krait(void);
+const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
+const int cpu_is_krait_v3(void);
+
+static inline int cpu_is_msm7x01(void)
+{
+#ifdef CONFIG_ARCH_MSM7X01A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+#ifdef CONFIG_ARCH_MSM7X25
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25ab(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x55(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X55;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x60(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ return read_msm_cpu_type() == MSM_CPU_8X60;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8960(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_msm8960ab(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_apq8064(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064ab(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8627(void)
+{
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8627;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_fsm9xxx(void)
+{
+#ifdef CONFIG_ARCH_FSM9XXX
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == FSM_CPU_9XXX;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm9615(void)
+{
+#ifdef CONFIG_ARCH_MSM9615
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_9615;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msmferrum(void)
+{
+#ifdef CONFIG_ARCH_MSMFERRUM
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_FERRUM;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8916(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8916;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8936(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8936;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8939(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8939;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8226;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8610;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+ return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+ return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+ return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+ cpu_is_msm8627();
+}
+
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
+#endif
diff --git a/include/soc/qcom/tracer_pkt.h b/include/soc/qcom/tracer_pkt.h
new file mode 100644
index 0000000000000..2657b79b1ed6f
--- /dev/null
+++ b/include/soc/qcom/tracer_pkt.h
@@ -0,0 +1,130 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _TRACER_PKT_H_
+#define _TRACER_PKT_H_
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_TRACER_PKT
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data: Pointer to the buffer to be initialized with a tracer
+ * packet.
+ * @data_len: Length of the buffer.
+ * @client_event_cfg: Client-specific event configuration mask.
+ * @glink_event_cfg: G-Link-specific event configuration mask.
+ * @pkt_priv: Private/Cookie information to be added to the tracer
+ * packet.
+ * @pkt_priv_len: Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+ uint16_t client_event_cfg, uint32_t glink_event_cfg,
+ void *pkt_priv, size_t pkt_priv_len);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ * packet
+ * @data: Pointer to the buffer to be initialized with event
+ * configuration mask.
+ * @client_event_cfg: Client-specific event configuration mask.
+ * @glink_event_cfg: G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+ uint32_t glink_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data: Pointer to the buffer containing tracer packet.
+ * @event_id: Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ * packet
+ * @data: Pointer to the buffer containing tracer packet.
+ * @data_len: Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf: Buffer to contain the hex dump of the tracer packet.
+ * @buf_len: Length of the hex dump buffer.
+ * @data: Buffer containing the tracer packet.
+ * @data_len: Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len);
+
+#else
+
+static inline int tracer_pkt_init(void *data, size_t data_len,
+ uint16_t client_event_cfg, uint32_t glink_event_cfg,
+ void *pkt_priv, size_t pkt_priv_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_set_event_cfg(uint16_t client_event_cfg,
+ uint32_t glink_event_cfg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data,
+ size_t data_len)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_TRACER_PKT */
+#endif /* _TRACER_PKT_H_ */
diff --git a/include/sound/apr_audio.h b/include/sound/apr_audio.h
new file mode 100644
index 0000000000000..d3c8a4f23dae8
--- /dev/null
+++ b/include/sound/apr_audio.h
@@ -0,0 +1,1675 @@
+/*
+ *
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _APR_AUDIO_H_
+#define _APR_AUDIO_H_
+
+/* ASM opcodes without APR payloads*/
+#include <sound/qdsp6v2/apr.h>
+
+/*
+ * Audio Front End (AFE)
+ */
+
+/* Port ID. Update afe_get_port_index when a new port is added here. */
+#define PRIMARY_I2S_RX 0 /* index = 0 */
+#define PRIMARY_I2S_TX 1 /* index = 1 */
+#define PCM_RX 2 /* index = 2 */
+#define PCM_TX 3 /* index = 3 */
+#define SECONDARY_I2S_RX 4 /* index = 4 */
+#define SECONDARY_I2S_TX 5 /* index = 5 */
+#define MI2S_RX 6 /* index = 6 */
+#define MI2S_TX 7 /* index = 7 */
+#define HDMI_RX 8 /* index = 8 */
+#define RSVD_2 9 /* index = 9 */
+#define RSVD_3 10 /* index = 10 */
+#define DIGI_MIC_TX 11 /* index = 11 */
+#define VOICE_RECORD_RX 0x8003 /* index = 12 */
+#define VOICE_RECORD_TX 0x8004 /* index = 13 */
+#define VOICE_PLAYBACK_TX 0x8005 /* index = 14 */
+
+/* Slimbus Multi channel port id pool */
+#define SLIMBUS_0_RX 0x4000 /* index = 15 */
+#define SLIMBUS_0_TX 0x4001 /* index = 16 */
+#define SLIMBUS_1_RX 0x4002 /* index = 17 */
+#define SLIMBUS_1_TX 0x4003 /* index = 18 */
+#define SLIMBUS_2_RX 0x4004
+#define SLIMBUS_2_TX 0x4005
+#define SLIMBUS_3_RX 0x4006
+#define SLIMBUS_3_TX 0x4007
+#define SLIMBUS_4_RX 0x4008
+#define SLIMBUS_4_TX 0x4009 /* index = 24 */
+
+#define INT_BT_SCO_RX 0x3000 /* index = 25 */
+#define INT_BT_SCO_TX 0x3001 /* index = 26 */
+#define INT_BT_A2DP_RX 0x3002 /* index = 27 */
+#define INT_FM_RX 0x3004 /* index = 28 */
+#define INT_FM_TX 0x3005 /* index = 29 */
+#define RT_PROXY_PORT_001_RX 0x2000 /* index = 30 */
+#define RT_PROXY_PORT_001_TX 0x2001 /* index = 31 */
+#define SECONDARY_PCM_RX 12 /* index = 32 */
+#define SECONDARY_PCM_TX 13 /* index = 33 */
+
+
+#define AFE_PORT_INVALID 0xFFFF
+#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
+
+#define AFE_PORT_CMD_START 0x000100ca
+
+#define AFE_EVENT_RTPORT_START 0
+#define AFE_EVENT_RTPORT_STOP 1
+#define AFE_EVENT_RTPORT_LOW_WM 2
+#define AFE_EVENT_RTPORT_HI_WM 3
+
+struct afe_port_start_command {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 gain; /* Q13 */
+ u32 sample_rate; /* 8 , 16, 48khz */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_STOP 0x000100cb
+struct afe_port_stop_command {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_APPLY_GAIN 0x000100cc
+struct afe_port_gain_command {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 gain;/* Q13 */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_SIDETONE_CTL 0x000100cd
+struct afe_port_sidetone_command {
+ struct apr_hdr hdr;
+ u16 rx_port_id; /* Primary i2s tx = 1 */
+ /* PCM tx = 3 */
+ /* Secondary i2s tx = 5 */
+ /* Mi2s tx = 7 */
+ /* Digital mic tx = 11 */
+ u16 tx_port_id; /* Primary i2s rx = 0 */
+ /* PCM rx = 2 */
+ /* Secondary i2s rx = 4 */
+ /* Mi2S rx = 6 */
+ /* HDMI rx = 8 */
+ u16 gain; /* Q13 */
+ u16 enable; /* 1 = enable, 0 = disable */
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_LOOPBACK 0x000100ce
+struct afe_loopback_command {
+ struct apr_hdr hdr;
+ u16 tx_port_id; /* Primary i2s rx = 0 */
+ /* PCM rx = 2 */
+ /* Secondary i2s rx = 4 */
+ /* Mi2S rx = 6 */
+ /* HDMI rx = 8 */
+ u16 rx_port_id; /* Primary i2s tx = 1 */
+ /* PCM tx = 3 */
+ /* Secondary i2s tx = 5 */
+ /* Mi2s tx = 7 */
+ /* Digital mic tx = 11 */
+ u16 mode; /* Default -1, DSP will conver
+ the tx to rx format */
+ u16 enable; /* 1 = enable, 0 = disable */
+} __attribute__ ((packed));
+
+#define AFE_PSEUDOPORT_CMD_START 0x000100cf
+struct afe_pseudoport_start_command {
+ struct apr_hdr hdr;
+ u16 port_id; /* Pseudo Port 1 = 0x8000 */
+ /* Pseudo Port 2 = 0x8001 */
+ /* Pseudo Port 3 = 0x8002 */
+ u16 timing; /* FTRT = 0 , AVTimer = 1, */
+} __attribute__ ((packed));
+
+#define AFE_PSEUDOPORT_CMD_STOP 0x000100d0
+struct afe_pseudoport_stop_command {
+ struct apr_hdr hdr;
+ u16 port_id; /* Pseudo Port 1 = 0x8000 */
+ /* Pseudo Port 2 = 0x8001 */
+ /* Pseudo Port 3 = 0x8002 */
+ u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_CMD_GET_ACTIVE_PORTS 0x000100d1
+
+
+#define AFE_CMD_GET_ACTIVE_HANDLES_FOR_PORT 0x000100d2
+struct afe_get_active_handles_command {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_PCM_CFG_MODE_PCM 0x0
+#define AFE_PCM_CFG_MODE_AUX 0x1
+#define AFE_PCM_CFG_SYNC_EXT 0x0
+#define AFE_PCM_CFG_SYNC_INT 0x1
+#define AFE_PCM_CFG_FRM_8BPF 0x0
+#define AFE_PCM_CFG_FRM_16BPF 0x1
+#define AFE_PCM_CFG_FRM_32BPF 0x2
+#define AFE_PCM_CFG_FRM_64BPF 0x3
+#define AFE_PCM_CFG_FRM_128BPF 0x4
+#define AFE_PCM_CFG_FRM_256BPF 0x5
+#define AFE_PCM_CFG_QUANT_ALAW_NOPAD 0x0
+#define AFE_PCM_CFG_QUANT_MULAW_NOPAD 0x1
+#define AFE_PCM_CFG_QUANT_LINEAR_NOPAD 0x2
+#define AFE_PCM_CFG_QUANT_ALAW_PAD 0x3
+#define AFE_PCM_CFG_QUANT_MULAW_PAD 0x4
+#define AFE_PCM_CFG_QUANT_LINEAR_PAD 0x5
+#define AFE_PCM_CFG_CDATAOE_MASTER 0x0
+#define AFE_PCM_CFG_CDATAOE_SHARE 0x1
+
+struct afe_port_pcm_cfg {
+ u16 mode; /* PCM (short sync) = 0, AUXPCM (long sync) = 1 */
+ u16 sync; /* external = 0 , internal = 1 */
+ u16 frame; /* 8 bpf = 0 */
+ /* 16 bpf = 1 */
+ /* 32 bpf = 2 */
+ /* 64 bpf = 3 */
+ /* 128 bpf = 4 */
+ /* 256 bpf = 5 */
+ u16 quant;
+ u16 slot; /* Slot for PCM stream , 0 - 31 */
+ u16 data; /* 0, PCM block is the only master */
+ /* 1, PCM block is shares to driver data out signal */
+ /* other master */
+ u16 reserved;
+} __attribute__ ((packed));
+
+enum {
+ AFE_I2S_SD0 = 1,
+ AFE_I2S_SD1,
+ AFE_I2S_SD2,
+ AFE_I2S_SD3,
+ AFE_I2S_QUAD01,
+ AFE_I2S_QUAD23,
+ AFE_I2S_6CHS,
+ AFE_I2S_8CHS,
+};
+
+#define AFE_MI2S_MONO 0
+#define AFE_MI2S_STEREO 3
+#define AFE_MI2S_4CHANNELS 4
+#define AFE_MI2S_6CHANNELS 6
+#define AFE_MI2S_8CHANNELS 8
+
+struct afe_port_mi2s_cfg {
+ u16 bitwidth; /* 16,24,32 */
+ u16 line; /* Called ChannelMode in documentation */
+ /* i2s_sd0 = 1 */
+ /* i2s_sd1 = 2 */
+ /* i2s_sd2 = 3 */
+ /* i2s_sd3 = 4 */
+ /* i2s_quad01 = 5 */
+ /* i2s_quad23 = 6 */
+ /* i2s_6chs = 7 */
+ /* i2s_8chs = 8 */
+ u16 channel; /* Called MonoStereo in documentation */
+ /* i2s mono = 0 */
+ /* i2s mono right = 1 */
+ /* i2s mono left = 2 */
+ /* i2s stereo = 3 */
+ u16 ws; /* 0, word select signal from external source */
+ /* 1, word select signal from internal source */
+ u16 format; /* don't touch this field if it is not for */
+ /* AFE_PORT_CMD_I2S_CONFIG opcode */
+} __attribute__ ((packed));
+
+struct afe_port_hdmi_cfg {
+ u16 bitwidth; /* 16,24,32 */
+ u16 channel_mode; /* HDMI Stereo = 0 */
+ /* HDMI_3Point1 (4-ch) = 1 */
+ /* HDMI_5Point1 (6-ch) = 2 */
+ /* HDMI_6Point1 (8-ch) = 3 */
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+} __attribute__ ((packed));
+
+
+struct afe_port_hdmi_multi_ch_cfg {
+ u16 data_type; /* HDMI_Linear = 0 */
+ /* HDMI_non_Linear = 1 */
+ u16 channel_allocation; /* The default is 0 (Stereo) */
+ u16 reserved; /* must be set to 0 */
+} __packed;
+
+
+/* Slimbus Device Ids */
+#define AFE_SLIMBUS_DEVICE_1 0x0
+#define AFE_SLIMBUS_DEVICE_2 0x1
+#define AFE_PORT_MAX_AUDIO_CHAN_CNT 16
+
+struct afe_port_slimbus_cfg {
+ u16 slimbus_dev_id; /* SLIMBUS Device id.*/
+
+ u16 slave_dev_pgd_la; /* Slave ported generic device
+ * logical address.
+ */
+ u16 slave_dev_intfdev_la; /* Slave interface device logical
+ * address.
+ */
+ u16 bit_width; /** bit width of the samples, 16, 24.*/
+
+ u16 data_format; /** data format.*/
+
+ u16 num_channels; /** Number of channels.*/
+
+ /** Slave port mapping for respective channels.*/
+ u16 slave_port_mapping[AFE_PORT_MAX_AUDIO_CHAN_CNT];
+
+ u16 reserved;
+} __packed;
+
+struct afe_port_slimbus_sch_cfg {
+ u16 slimbus_dev_id; /* SLIMBUS Device id.*/
+ u16 bit_width; /** bit width of the samples, 16, 24.*/
+ u16 data_format; /** data format.*/
+ u16 num_channels; /** Number of channels.*/
+ u16 reserved;
+ /** Slave channel mapping for respective channels.*/
+ u8 slave_ch_mapping[8];
+} __packed;
+
+struct afe_port_rtproxy_cfg {
+ u16 bitwidth; /* 16,24,32 */
+ u16 interleaved; /* interleaved = 1 */
+ /* Noninterleaved = 0 */
+ u16 frame_sz; /* 5ms buffers = 160bytes */
+ u16 jitter; /* 10ms of jitter = 320 */
+ u16 lw_mark; /* Low watermark in bytes for triggering event*/
+ u16 hw_mark; /* High watermark bytes for triggering event*/
+ u16 rsvd;
+ int num_ch; /* 1 to 8 */
+} __packed;
+
+#define AFE_PORT_AUDIO_IF_CONFIG 0x000100d3
+#define AFE_PORT_AUDIO_SLIM_SCH_CONFIG 0x000100e4
+#define AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG 0x000100D9
+#define AFE_PORT_CMD_I2S_CONFIG 0x000100E7
+
+union afe_port_config {
+ struct afe_port_pcm_cfg pcm;
+ struct afe_port_mi2s_cfg mi2s;
+ struct afe_port_hdmi_cfg hdmi;
+ struct afe_port_hdmi_multi_ch_cfg hdmi_multi_ch;
+ struct afe_port_slimbus_cfg slimbus;
+ struct afe_port_slimbus_sch_cfg slim_sch;
+ struct afe_port_rtproxy_cfg rtproxy;
+} __attribute__((packed));
+
+struct afe_audioif_config_command {
+ struct apr_hdr hdr;
+ u16 port_id;
+ union afe_port_config port;
+} __attribute__ ((packed));
+
+#define AFE_TEST_CODEC_LOOPBACK_CTL 0x000100d5
+struct afe_codec_loopback_command {
+ u16 port_inf; /* Primary i2s = 0 */
+ /* PCM = 2 */
+ /* Secondary i2s = 4 */
+ /* Mi2s = 6 */
+ u16 enable; /* 0, disable. 1, enable */
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_SIDETONE_GAIN 0x00010300
+struct afe_param_sidetone_gain {
+ u16 gain;
+ u16 reserved;
+} __attribute__ ((packed));
+
+#define AFE_PARAM_ID_SAMPLING_RATE 0x00010301
+struct afe_param_sampling_rate {
+ u32 sampling_rate;
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_CHANNELS 0x00010302
+struct afe_param_channels {
+ u16 channels;
+ u16 reserved;
+} __attribute__ ((packed));
+
+
+#define AFE_PARAM_ID_LOOPBACK_GAIN 0x00010303
+struct afe_param_loopback_gain {
+ u16 gain;
+ u16 reserved;
+} __attribute__ ((packed));
+
+/* Parameter ID used to configure and enable/disable the loopback path. The
+ * difference with respect to the existing API, AFE_PORT_CMD_LOOPBACK, is that
+ * it allows Rx port to be configured as source port in loopback path. Port-id
+ * in AFE_PORT_CMD_SET_PARAM cmd is the source port whcih can be Tx or Rx port.
+ * In addition, we can configure the type of routing mode to handle different
+ * use cases.
+*/
+enum {
+ /* Regular loopback from source to destination port */
+ LB_MODE_DEFAULT = 1,
+ /* Sidetone feed from Tx source to Rx destination port */
+ LB_MODE_SIDETONE,
+ /* Echo canceller reference, voice + audio + DTMF */
+ LB_MODE_EC_REF_VOICE_AUDIO,
+ /* Echo canceller reference, voice alone */
+ LB_MODE_EC_REF_VOICE
+};
+
+#define AFE_PARAM_ID_LOOPBACK_CONFIG 0x0001020B
+#define AFE_API_VERSION_LOOPBACK_CONFIG 0x1
+struct afe_param_loopback_cfg {
+ /* Minor version used for tracking the version of the configuration
+ * interface.
+ */
+ uint32_t loopback_cfg_minor_version;
+
+ /* Destination Port Id. */
+ uint16_t dst_port_id;
+
+ /* Specifies data path type from src to dest port. Supported values:
+ * LB_MODE_DEFAULT
+ * LB_MODE_SIDETONE
+ * LB_MODE_EC_REF_VOICE_AUDIO
+ * LB_MODE_EC_REF_VOICE
+ */
+ uint16_t routing_mode;
+
+ /* Specifies whether to enable (1) or disable (0) an AFE loopback. */
+ uint16_t enable;
+
+ /* Reserved for 32-bit alignment. This field must be set to 0. */
+ uint16_t reserved;
+} __packed;
+
+#define AFE_MODULE_ID_PORT_INFO 0x00010200
+/* Module ID for the loopback-related parameters. */
+#define AFE_MODULE_LOOPBACK 0x00010205
+struct afe_param_payload_base {
+ u32 module_id;
+ u32 param_id;
+ u16 param_size;
+ u16 reserved;
+} __packed;
+
+struct afe_param_payload {
+ struct afe_param_payload_base base;
+ union {
+ struct afe_param_sidetone_gain sidetone_gain;
+ struct afe_param_sampling_rate sampling_rate;
+ struct afe_param_channels channels;
+ struct afe_param_loopback_gain loopback_gain;
+ struct afe_param_loopback_cfg loopback_cfg;
+ } __attribute__((packed)) param;
+} __attribute__ ((packed));
+
+#define AFE_PORT_CMD_SET_PARAM 0x000100dc
+
+struct afe_port_cmd_set_param {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 payload_size;
+ u32 payload_address;
+ struct afe_param_payload payload;
+} __attribute__ ((packed));
+
+struct afe_port_cmd_set_param_no_payload {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 payload_size;
+ u32 payload_address;
+} __packed;
+
+#define AFE_EVENT_GET_ACTIVE_PORTS 0x00010100
+struct afe_get_active_ports_rsp {
+ u16 num_ports;
+ u16 port_id;
+} __attribute__ ((packed));
+
+
+#define AFE_EVENT_GET_ACTIVE_HANDLES 0x00010102
+struct afe_get_active_handles_rsp {
+ u16 port_id;
+ u16 num_handles;
+ u16 mode; /* 0, voice rx */
+ /* 1, voice tx */
+ /* 2, audio rx */
+ /* 3, audio tx */
+ u16 handle;
+} __attribute__ ((packed));
+
+#define AFE_SERVICE_CMD_MEMORY_MAP 0x000100DE
+struct afe_cmd_memory_map {
+ struct apr_hdr hdr;
+ u32 phy_addr;
+ u32 mem_sz;
+ u16 mem_id;
+ u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_MEMORY_UNMAP 0x000100DF
+struct afe_cmd_memory_unmap {
+ struct apr_hdr hdr;
+ u32 phy_addr;
+} __packed;
+
+#define AFE_SERVICE_CMD_REG_RTPORT 0x000100E0
+struct afe_cmd_reg_rtport {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_UNREG_RTPORT 0x000100E1
+struct afe_cmd_unreg_rtport {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 rsvd;
+} __packed;
+
+#define AFE_SERVICE_CMD_RTPORT_WR 0x000100E2
+struct afe_cmd_rtport_wr {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 rsvd;
+ u32 buf_addr;
+ u32 bytes_avail;
+} __packed;
+
+#define AFE_SERVICE_CMD_RTPORT_RD 0x000100E3
+struct afe_cmd_rtport_rd {
+ struct apr_hdr hdr;
+ u16 port_id;
+ u16 rsvd;
+ u32 buf_addr;
+ u32 bytes_avail;
+} __packed;
+
+#define AFE_EVENT_RT_PROXY_PORT_STATUS 0x00010105
+
+#define ADM_MAX_COPPS 5
+
+#define ADM_SERVICE_CMD_GET_COPP_HANDLES 0x00010300
+struct adm_get_copp_handles_command {
+ struct apr_hdr hdr;
+} __attribute__ ((packed));
+
+#define ADM_CMD_MATRIX_MAP_ROUTINGS 0x00010301
+struct adm_routings_session {
+ u16 id;
+ u16 num_copps;
+ u16 copp_id[ADM_MAX_COPPS+1]; /*Padding if numCopps is odd */
+} __packed;
+
+struct adm_routings_command {
+ struct apr_hdr hdr;
+ u32 path; /* 0 = Rx, 1 Tx */
+ u32 num_sessions;
+ struct adm_routings_session session[8];
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_MATRIX_RAMP_GAINS 0x00010302
+struct adm_ramp_gain {
+ struct apr_hdr hdr;
+ u16 session_id;
+ u16 copp_id;
+ u16 initial_gain;
+ u16 gain_increment;
+ u16 ramp_duration;
+ u16 reserved;
+} __attribute__ ((packed));
+
+struct adm_ramp_gains_command {
+ struct apr_hdr hdr;
+ u32 id;
+ u32 num_gains;
+ struct adm_ramp_gain gains[ADM_MAX_COPPS];
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_COPP_OPEN 0x00010304
+struct adm_copp_open_command {
+ struct apr_hdr hdr;
+ u16 flags;
+ u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+ u16 endpoint_id1;
+ u16 endpoint_id2;
+ u32 topology_id;
+ u16 channel_config;
+ u16 reserved;
+ u32 rate;
+} __attribute__ ((packed));
+
+#define ADM_CMD_COPP_CLOSE 0x00010305
+
+#define ADM_CMD_MULTI_CHANNEL_COPP_OPEN 0x00010310
+#define ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3 0x00010333
+struct adm_multi_ch_copp_open_command {
+ struct apr_hdr hdr;
+ u16 flags;
+ u16 mode; /* 1-RX, 2-Live TX, 3-Non Live TX */
+ u16 endpoint_id1;
+ u16 endpoint_id2;
+ u32 topology_id;
+ u16 channel_config;
+ u16 reserved;
+ u32 rate;
+ u8 dev_channel_mapping[8];
+} __packed;
+#define ADM_CMD_MEMORY_MAP 0x00010C30
+struct adm_cmd_memory_map{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u16 mempool_id;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_UNMAP 0x00010C31
+struct adm_cmd_memory_unmap{
+ struct apr_hdr hdr;
+ u32 buf_add;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_MAP_REGIONS 0x00010C47
+struct adm_memory_map_regions{
+ u32 phys;
+ u32 buf_size;
+} __attribute__((packed));
+
+struct adm_cmd_memory_map_regions{
+ struct apr_hdr hdr;
+ u16 mempool_id;
+ u16 nregions;
+} __attribute__((packed));
+
+#define ADM_CMD_MEMORY_UNMAP_REGIONS 0x00010C48
+struct adm_memory_unmap_regions{
+ u32 phys;
+} __attribute__((packed));
+
+struct adm_cmd_memory_unmap_regions{
+ struct apr_hdr hdr;
+ u16 nregions;
+ u16 reserved;
+} __attribute__((packed));
+
+#define DEFAULT_COPP_TOPOLOGY 0x00010be3
+#define DEFAULT_POPP_TOPOLOGY 0x00010be4
+#define VPM_TX_SM_ECNS_COPP_TOPOLOGY 0x00010F71
+#define VPM_TX_DM_FLUENCE_COPP_TOPOLOGY 0x00010F72
+#define VPM_TX_QMIC_FLUENCE_COPP_TOPOLOGY 0x00010F75
+
+#define LOWLATENCY_POPP_TOPOLOGY 0x00010C68
+#define LOWLATENCY_COPP_TOPOLOGY 0x00010312
+#define PCM_BITS_PER_SAMPLE 16
+
+#define ASM_OPEN_WRITE_PERF_MODE_BIT (1<<28)
+#define ASM_OPEN_READ_PERF_MODE_BIT (1<<29)
+#define ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT (1<<13)
+
+/* SRS TRUMEDIA GUIDS */
+/* topology */
+#define SRS_TRUMEDIA_TOPOLOGY_ID 0x00010D90
+/* module */
+#define SRS_TRUMEDIA_MODULE_ID 0x10005010
+/* parameters */
+#define SRS_TRUMEDIA_PARAMS 0x10005011
+#define SRS_TRUMEDIA_PARAMS_WOWHD 0x10005012
+#define SRS_TRUMEDIA_PARAMS_CSHP 0x10005013
+#define SRS_TRUMEDIA_PARAMS_HPF 0x10005014
+#define SRS_TRUMEDIA_PARAMS_PEQ 0x10005015
+#define SRS_TRUMEDIA_PARAMS_HL 0x10005016
+
+#define ASM_MAX_EQ_BANDS 12
+
+struct asm_eq_band {
+ u32 band_idx; /* The band index, 0 .. 11 */
+ u32 filter_type; /* Filter band type */
+ u32 center_freq_hz; /* Filter band center frequency */
+ u32 filter_gain; /* Filter band initial gain (dB) */
+ /* Range is +12 dB to -12 dB with 1dB increments. */
+ u32 q_factor;
+} __attribute__ ((packed));
+
+struct asm_equalizer_params {
+ u32 enable;
+ u32 num_bands;
+ struct asm_eq_band eq_bands[ASM_MAX_EQ_BANDS];
+} __attribute__ ((packed));
+
+struct asm_master_gain_params {
+ u16 master_gain;
+ u16 padding;
+} __attribute__ ((packed));
+
+struct asm_lrchannel_gain_params {
+ u16 left_gain;
+ u16 right_gain;
+} __attribute__ ((packed));
+
+struct asm_mute_params {
+ u32 muteflag;
+} __attribute__ ((packed));
+
+struct asm_softvolume_params {
+ u32 period;
+ u32 step;
+ u32 rampingcurve;
+} __attribute__ ((packed));
+
+struct asm_softpause_params {
+ u32 enable;
+ u32 period;
+ u32 step;
+ u32 rampingcurve;
+} __packed;
+
+struct asm_pp_param_data_hdr {
+ u32 module_id;
+ u32 param_id;
+ u16 param_size;
+ u16 reserved;
+} __attribute__ ((packed));
+
+struct asm_pp_params_command {
+ struct apr_hdr hdr;
+ u32 *payload;
+ u32 payload_size;
+ struct asm_pp_param_data_hdr params;
+} __attribute__ ((packed));
+
+#define EQUALIZER_MODULE_ID 0x00010c27
+#define EQUALIZER_PARAM_ID 0x00010c28
+
+#define VOLUME_CONTROL_MODULE_ID 0x00010bfe
+#define MASTER_GAIN_PARAM_ID 0x00010bff
+#define L_R_CHANNEL_GAIN_PARAM_ID 0x00010c00
+#define MUTE_CONFIG_PARAM_ID 0x00010c01
+#define SOFT_PAUSE_PARAM_ID 0x00010D6A
+#define SOFT_VOLUME_PARAM_ID 0x00010C29
+
+#define IIR_FILTER_ENABLE_PARAM_ID 0x00010c03
+#define IIR_FILTER_PREGAIN_PARAM_ID 0x00010c04
+#define IIR_FILTER_CONFIG_PARAM_ID 0x00010c05
+
+#define MBADRC_MODULE_ID 0x00010c06
+#define MBADRC_ENABLE_PARAM_ID 0x00010c07
+#define MBADRC_CONFIG_PARAM_ID 0x00010c08
+
+
+#define ADM_CMD_SET_PARAMS 0x00010306
+#define ADM_CMD_GET_PARAMS 0x0001030B
+#define ADM_CMDRSP_GET_PARAMS 0x0001030C
+struct adm_set_params_command {
+ struct apr_hdr hdr;
+ u32 payload;
+ u32 payload_size;
+} __attribute__ ((packed));
+
+
+#define ADM_CMD_TAP_COPP_PCM 0x00010307
+struct adm_tap_copp_pcm_command {
+ struct apr_hdr hdr;
+} __attribute__ ((packed));
+
+
+/* QDSP6 to Client messages
+*/
+#define ADM_SERVICE_CMDRSP_GET_COPP_HANDLES 0x00010308
+struct adm_get_copp_handles_respond {
+ struct apr_hdr hdr;
+ u32 handles;
+ u32 copp_id;
+} __attribute__ ((packed));
+
+#define ADM_CMDRSP_COPP_OPEN 0x0001030A
+struct adm_copp_open_respond {
+ u32 status;
+ u16 copp_id;
+ u16 reserved;
+} __attribute__ ((packed));
+
+#define ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN 0x00010311
+#define ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN_V3 0x00010334
+
+
+#define ASM_STREAM_PRIORITY_NORMAL 0
+#define ASM_STREAM_PRIORITY_LOW 1
+#define ASM_STREAM_PRIORITY_HIGH 2
+#define ASM_STREAM_PRIORITY_RESERVED 3
+
+#define ASM_END_POINT_DEVICE_MATRIX 0
+#define ASM_END_POINT_STREAM 1
+
+#define AAC_ENC_MODE_AAC_LC 0x02
+#define AAC_ENC_MODE_AAC_P 0x05
+#define AAC_ENC_MODE_EAAC_P 0x1D
+
+#define ASM_STREAM_CMD_CLOSE 0x00010BCD
+#define ASM_STREAM_CMD_FLUSH 0x00010BCE
+#define ASM_STREAM_CMD_SET_PP_PARAMS 0x00010BCF
+#define ASM_STREAM_CMD_GET_PP_PARAMS 0x00010BD0
+#define ASM_STREAM_CMDRSP_GET_PP_PARAMS 0x00010BD1
+#define ASM_SESSION_CMD_PAUSE 0x00010BD3
+#define ASM_SESSION_CMD_GET_SESSION_TIME 0x00010BD4
+#define ASM_DATA_CMD_EOS 0x00010BDB
+#define ASM_DATA_EVENT_EOS 0x00010BDD
+
+#define ASM_SERVICE_CMD_GET_STREAM_HANDLES 0x00010C0B
+#define ASM_STREAM_CMD_FLUSH_READBUFS 0x00010C09
+
+#define ASM_SESSION_EVENT_RX_UNDERFLOW 0x00010C17
+#define ASM_SESSION_EVENT_TX_OVERFLOW 0x00010C18
+#define ASM_SERVICE_CMD_GET_WALLCLOCK_TIME 0x00010C19
+#define ASM_DATA_CMDRSP_EOS 0x00010C1C
+
+/* ASM Data structures */
+
+/* common declarations */
+struct asm_pcm_cfg {
+ u16 ch_cfg;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u16 is_signed;
+ u16 interleaved;
+};
+
+#define PCM_CHANNEL_NULL 0
+
+/* Front left channel. */
+#define PCM_CHANNEL_FL 1
+
+/* Front right channel. */
+#define PCM_CHANNEL_FR 2
+
+/* Front center channel. */
+#define PCM_CHANNEL_FC 3
+
+/* Left surround channel.*/
+#define PCM_CHANNEL_LS 4
+
+/* Right surround channel.*/
+#define PCM_CHANNEL_RS 5
+
+/* Low frequency effect channel. */
+#define PCM_CHANNEL_LFE 6
+
+/* Center surround channel; Rear center channel. */
+#define PCM_CHANNEL_CS 7
+
+/* Left back channel; Rear left channel. */
+#define PCM_CHANNEL_LB 8
+
+/* Right back channel; Rear right channel. */
+#define PCM_CHANNEL_RB 9
+
+/* Top surround channel. */
+#define PCM_CHANNEL_TS 10
+
+/* Center vertical height channel.*/
+#define PCM_CHANNEL_CVH 11
+
+/* Mono surround channel.*/
+#define PCM_CHANNEL_MS 12
+
+/* Front left of center. */
+#define PCM_CHANNEL_FLC 13
+
+/* Front right of center. */
+#define PCM_CHANNEL_FRC 14
+
+/* Rear left of center. */
+#define PCM_CHANNEL_RLC 15
+
+/* Rear right of center. */
+#define PCM_CHANNEL_RRC 16
+
+#define PCM_FORMAT_MAX_NUM_CHANNEL 8
+
+/* Maximum number of channels supported
+ * in ASM_ENCDEC_DEC_CHAN_MAP command
+ */
+#define MAX_CHAN_MAP_CHANNELS 16
+/*
+ * Multiple-channel PCM decoder format block structure used in the
+ * #ASM_STREAM_CMD_OPEN_WRITE command.
+ * The data must be in little-endian format.
+ */
+struct asm_multi_channel_pcm_fmt_blk {
+
+ u16 num_channels; /*
+ * Number of channels.
+ * Supported values:1 to 8
+ */
+
+ u16 bits_per_sample; /*
+ * Number of bits per sample per channel.
+ * Supported values: 16, 24 When used for
+ * playback, the client must send 24-bit
+ * samples packed in 32-bit words. The
+ * 24-bit samples must be placed in the most
+ * significant 24 bits of the 32-bit word. When
+ * used for recording, the aDSP sends 24-bit
+ * samples packed in 32-bit words. The 24-bit
+ * samples are placed in the most significant
+ * 24 bits of the 32-bit word.
+ */
+
+ u32 sample_rate; /*
+ * Number of samples per second
+ * (in Hertz). Supported values:
+ * 2000 to 48000
+ */
+
+ u16 is_signed; /*
+ * Flag that indicates the samples
+ * are signed (1).
+ */
+
+ u16 is_interleaved; /*
+ * Flag that indicates whether the channels are
+ * de-interleaved (0) or interleaved (1).
+ * Interleaved format means corresponding
+ * samples from the left and right channels are
+ * interleaved within the buffer.
+ * De-interleaved format means samples from
+ * each channel are contiguous in the buffer.
+ * The samples from one channel immediately
+ * follow those of the previous channel.
+ */
+
+ u8 channel_mapping[8]; /*
+ * Supported values:
+ * PCM_CHANNEL_NULL, PCM_CHANNEL_FL,
+ * PCM_CHANNEL_FR, PCM_CHANNEL_FC,
+ * PCM_CHANNEL_LS, PCM_CHANNEL_RS,
+ * PCM_CHANNEL_LFE, PCM_CHANNEL_CS,
+ * PCM_CHANNEL_LB, PCM_CHANNEL_RB,
+ * PCM_CHANNEL_TS, PCM_CHANNEL_CVH,
+ * PCM_CHANNEL_MS, PCM_CHANNEL_FLC,
+ * PCM_CHANNEL_FRC, PCM_CHANNEL_RLC,
+ * PCM_CHANNEL_RRC.
+ * Channel[i] mapping describes channel I. Each
+ * element i of the array describes channel I
+ * inside the buffer where I < num_channels.
+ * An unused channel is set to zero.
+ */
+};
+
+struct asm_adpcm_cfg {
+ u16 ch_cfg;
+ u16 bits_per_sample;
+ u32 sample_rate;
+ u32 block_size;
+};
+
+struct asm_yadpcm_cfg {
+ u16 ch_cfg;
+ u16 bits_per_sample;
+ u32 sample_rate;
+};
+
+struct asm_midi_cfg {
+ u32 nMode;
+};
+
+struct asm_wma_cfg {
+ u16 format_tag;
+ u16 ch_cfg;
+ u32 sample_rate;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 valid_bits_per_sample;
+ u32 ch_mask;
+ u16 encode_opt;
+ u16 adv_encode_opt;
+ u32 adv_encode_opt2;
+ u32 drc_peak_ref;
+ u32 drc_peak_target;
+ u32 drc_ave_ref;
+ u32 drc_ave_target;
+};
+
+struct asm_wmapro_cfg {
+ u16 format_tag;
+ u16 ch_cfg;
+ u32 sample_rate;
+ u32 avg_bytes_per_sec;
+ u16 block_align;
+ u16 valid_bits_per_sample;
+ u32 ch_mask;
+ u16 encode_opt;
+ u16 adv_encode_opt;
+ u32 adv_encode_opt2;
+ u32 drc_peak_ref;
+ u32 drc_peak_target;
+ u32 drc_ave_ref;
+ u32 drc_ave_target;
+};
+
+struct asm_aac_cfg {
+ u16 format;
+ u16 aot;
+ u16 ep_config;
+ u16 section_data_resilience;
+ u16 scalefactor_data_resilience;
+ u16 spectral_data_resilience;
+ u16 ch_cfg;
+ u16 reserved;
+ u32 sample_rate;
+};
+
+struct asm_amrwbplus_cfg {
+ u32 size_bytes;
+ u32 version;
+ u32 num_channels;
+ u32 amr_band_mode;
+ u32 amr_dtx_mode;
+ u32 amr_frame_fmt;
+ u32 amr_lsf_idx;
+};
+
+struct asm_flac_cfg {
+ u16 stream_info_present;
+ u16 min_blk_size;
+ u16 max_blk_size;
+ u16 ch_cfg;
+ u16 sample_size;
+ u16 sample_rate;
+ u16 md5_sum;
+ u32 ext_sample_rate;
+ u32 min_frame_size;
+ u32 max_frame_size;
+};
+
+struct asm_vorbis_cfg {
+ u32 ch_cfg;
+ u32 bit_rate;
+ u32 min_bit_rate;
+ u32 max_bit_rate;
+ u16 bit_depth_pcm_sample;
+ u16 bit_stream_format;
+};
+
+struct asm_aac_read_cfg {
+ u32 bitrate;
+ u32 enc_mode;
+ u16 format;
+ u16 ch_cfg;
+ u32 sample_rate;
+};
+
+struct asm_amrnb_read_cfg {
+ u16 mode;
+ u16 dtx_mode;
+};
+
+struct asm_amrwb_read_cfg {
+ u16 mode;
+ u16 dtx_mode;
+};
+
+struct asm_evrc_read_cfg {
+ u16 max_rate;
+ u16 min_rate;
+ u16 rate_modulation_cmd;
+ u16 reserved;
+};
+
+struct asm_qcelp13_read_cfg {
+ u16 max_rate;
+ u16 min_rate;
+ u16 reduced_rate_level;
+ u16 rate_modulation_cmd;
+};
+
+struct asm_sbc_read_cfg {
+ u32 subband;
+ u32 block_len;
+ u32 ch_mode;
+ u32 alloc_method;
+ u32 bit_rate;
+ u32 sample_rate;
+};
+
+struct asm_sbc_bitrate {
+ u32 bitrate;
+};
+
+struct asm_immed_decode {
+ u32 mode;
+};
+
+struct asm_sbr_ps {
+ u32 enable;
+};
+
+struct asm_dual_mono {
+ u16 sce_left;
+ u16 sce_right;
+};
+
+struct asm_dec_chan_map {
+ u32 num_channels; /* Number of decoder output
+ * channels. A value of 0
+ * indicates native channel
+ * mapping, which is valid
+ * only for NT mode. This
+ * means the output of the
+ * decoder is to be preserved
+ * as is.
+ */
+
+ u8 channel_mapping[MAX_CHAN_MAP_CHANNELS];/* Channel array of size
+ * num_channels. It can grow
+ * till MAX_CHAN_MAP_CHANNELS.
+ * Channel[i] mapping
+ * describes channel I inside
+ * the decoder output buffer.
+ * Valid channel mapping
+ * values are to be present at
+ * the beginning of the array.
+ * All remaining elements of
+ * the array are to be filled
+ * with PCM_CHANNEL_NULL.
+ */
+};
+
+struct asm_encode_cfg_blk {
+ u32 frames_per_buf;
+ u32 format_id;
+ u32 cfg_size;
+ union {
+ struct asm_pcm_cfg pcm;
+ struct asm_aac_read_cfg aac;
+ struct asm_amrnb_read_cfg amrnb;
+ struct asm_evrc_read_cfg evrc;
+ struct asm_qcelp13_read_cfg qcelp13;
+ struct asm_sbc_read_cfg sbc;
+ struct asm_amrwb_read_cfg amrwb;
+ struct asm_multi_channel_pcm_fmt_blk mpcm;
+ } __attribute__((packed)) cfg;
+};
+
+struct asm_frame_meta_info {
+ u32 offset_to_frame;
+ u32 frame_size;
+ u32 encoded_pcm_samples;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 nflags;
+};
+
+/* Stream level commands */
+#define ASM_STREAM_CMD_OPEN_READ 0x00010BCB
+#define ASM_STREAM_CMD_OPEN_READ_V2_1 0x00010DB2
+struct asm_stream_cmd_open_read {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u32 src_endpoint;
+ u32 pre_proc_top;
+ u32 format;
+} __attribute__((packed));
+
+struct asm_stream_cmd_open_read_v2_1 {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u32 src_endpoint;
+ u32 pre_proc_top;
+ u32 format;
+ u16 bits_per_sample;
+ u16 reserved;
+} __packed;
+
+/* Supported formats */
+#define LINEAR_PCM 0x00010BE5
+#define DTMF 0x00010BE6
+#define ADPCM 0x00010BE7
+#define YADPCM 0x00010BE8
+#define MP3 0x00010BE9
+#define MPEG4_AAC 0x00010BEA
+#define AMRNB_FS 0x00010BEB
+#define AMRWB_FS 0x00010BEC
+#define V13K_FS 0x00010BED
+#define EVRC_FS 0x00010BEE
+#define EVRCB_FS 0x00010BEF
+#define EVRCWB_FS 0x00010BF0
+#define MIDI 0x00010BF1
+#define SBC 0x00010BF2
+#define WMA_V10PRO 0x00010BF3
+#define WMA_V9 0x00010BF4
+#define AMR_WB_PLUS 0x00010BF5
+#define AC3_DECODER 0x00010BF6
+#define EAC3_DECODER 0x00010C3C
+#define DTS 0x00010D88
+#define DTS_LBR 0x00010DBB
+#define ATRAC 0x00010D89
+#define MAT 0x00010D8A
+#define G711_ALAW_FS 0x00010BF7
+#define G711_MLAW_FS 0x00010BF8
+#define G711_PCM_FS 0x00010BF9
+#define MPEG4_MULTI_AAC 0x00010D86
+#define US_POINT_EPOS_FORMAT 0x00012310
+#define US_RAW_FORMAT 0x0001127C
+#define MULTI_CHANNEL_PCM 0x00010C66
+
+#define ASM_ENCDEC_SBCRATE 0x00010C13
+#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
+#define ASM_ENCDEC_CFG_BLK 0x00010C2C
+
+#define ASM_ENCDEC_SBCRATE 0x00010C13
+#define ASM_ENCDEC_IMMDIATE_DECODE 0x00010C14
+#define ASM_ENCDEC_CFG_BLK 0x00010C2C
+
+#define ASM_STREAM_CMD_OPEN_READ_COMPRESSED 0x00010D95
+struct asm_stream_cmd_open_read_compressed {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u32 frame_per_buf;
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_WRITE 0x00010BCA
+#define ASM_STREAM_CMD_OPEN_WRITE_V2_1 0x00010DB1
+struct asm_stream_cmd_open_write {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u16 sink_endpoint;
+ u16 stream_handle;
+ u32 post_proc_top;
+ u32 format;
+} __attribute__((packed));
+
+#define IEC_61937_MASK 0x00000001
+#define IEC_60958_MASK 0x00000002
+
+#define ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED 0x00010D84
+struct asm_stream_cmd_open_write_compressed {
+ struct apr_hdr hdr;
+ u32 flags;
+ u32 format;
+} __packed;
+
+#define ASM_STREAM_CMD_OPEN_READWRITE 0x00010BCC
+
+struct asm_stream_cmd_open_read_write {
+ struct apr_hdr hdr;
+ u32 uMode;
+ u32 post_proc_top;
+ u32 write_format;
+ u32 read_format;
+} __attribute__((packed));
+
+#define ASM_STREAM_CMD_OPEN_LOOPBACK 0x00010D6E
+struct asm_stream_cmd_open_loopback {
+ struct apr_hdr hdr;
+ u32 mode_flags;
+/* Mode flags.
+ * Bit 0-31: reserved; client should set these bits to 0
+ */
+ u16 src_endpointype;
+ /* Endpoint type. 0 = Tx Matrix */
+ u16 sink_endpointype;
+ /* Endpoint type. 0 = Rx Matrix */
+ u32 postprocopo_id;
+/* Postprocessor topology ID. Specifies the topology of
+ * postprocessing algorithms.
+ */
+} __packed;
+
+#define ADM_CMD_CONNECT_AFE_PORT 0x00010320
+#define ADM_CMD_DISCONNECT_AFE_PORT 0x00010321
+
+struct adm_cmd_connect_afe_port {
+ struct apr_hdr hdr;
+ u8 mode; /*mode represent the interface is for RX or TX*/
+ u8 session_id; /*ASM session ID*/
+ u16 afe_port_id;
+} __packed;
+
+#define ADM_CMD_CONNECT_AFE_PORT_V2 0x00010332
+
+struct adm_cmd_connect_afe_port_v2 {
+ struct apr_hdr hdr;
+ u8 mode; /*mode represent the interface is for RX or TX*/
+ u8 session_id; /*ASM session ID*/
+ u16 afe_port_id;
+ u32 num_channels;
+ u32 sampleing_rate;
+} __packed;
+
+#define ASM_STREAM_CMD_SET_ENCDEC_PARAM 0x00010C10
+#define ASM_STREAM_CMD_GET_ENCDEC_PARAM 0x00010C11
+#define ASM_ENCDEC_CFG_BLK_ID 0x00010C2C
+#define ASM_ENABLE_SBR_PS 0x00010C63
+#define ASM_CONFIGURE_DUAL_MONO 0x00010C64
+struct asm_stream_cmd_encdec_cfg_blk{
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ struct asm_encode_cfg_blk enc_blk;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_sbc_bitrate{
+ struct apr_hdr hdr;
+ u32 param_id;
+ struct asm_sbc_bitrate sbc_bitrate;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_immed_decode{
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ struct asm_immed_decode dec;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_sbr{
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ struct asm_sbr_ps sbr_ps;
+} __attribute__((packed));
+
+struct asm_stream_cmd_encdec_dualmono {
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ struct asm_dual_mono channel_map;
+} __packed;
+
+#define ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG 0x00010DD8
+
+/* Structure for AAC decoder stereo coefficient setting. */
+
+struct asm_aac_stereo_mix_coeff_selection_param {
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ u32 aac_stereo_mix_coeff_flag;
+} __packed;
+
+#define ASM_ENCDEC_DEC_CHAN_MAP 0x00010D82
+struct asm_stream_cmd_encdec_channelmap {
+ struct apr_hdr hdr;
+ u32 param_id;
+ u32 param_size;
+ struct asm_dec_chan_map chan_map;
+} __packed;
+
+#define ASM_STREAM _CMD_ADJUST_SAMPLES 0x00010C0A
+struct asm_stream_cmd_adjust_samples{
+ struct apr_hdr hdr;
+ u16 nsamples;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_STREAM_CMD_TAP_POPP_PCM 0x00010BF9
+struct asm_stream_cmd_tap_popp_pcm{
+ struct apr_hdr hdr;
+ u16 enable;
+ u16 reserved;
+ u32 module_id;
+} __attribute__((packed));
+
+/* Session Level commands */
+#define ASM_SESSION_CMD_MEMORY_MAP 0x00010C32
+struct asm_stream_cmd_memory_map{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u16 mempool_id;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP 0x00010C33
+struct asm_stream_cmd_memory_unmap{
+ struct apr_hdr hdr;
+ u32 buf_add;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_MAP_REGIONS 0x00010C45
+struct asm_memory_map_regions{
+ u32 phys;
+ u32 buf_size;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_map_regions{
+ struct apr_hdr hdr;
+ u16 mempool_id;
+ u16 nregions;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS 0x00010C46
+struct asm_memory_unmap_regions{
+ u32 phys;
+} __attribute__((packed));
+
+struct asm_stream_cmd_memory_unmap_regions{
+ struct apr_hdr hdr;
+ u16 nregions;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_RUN 0x00010BD2
+struct asm_stream_cmd_run{
+ struct apr_hdr hdr;
+ u32 flags;
+ u32 msw_ts;
+ u32 lsw_ts;
+} __attribute__((packed));
+
+/* Session level events */
+#define ASM_SESSION_CMD_REGISTER_FOR_RX_UNDERFLOW_EVENTS 0x00010BD5
+struct asm_stream_cmd_reg_rx_underflow_event{
+ struct apr_hdr hdr;
+ u16 enable;
+ u16 reserved;
+} __attribute__((packed));
+
+#define ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS 0x00010BD6
+struct asm_stream_cmd_reg_tx_overflow_event{
+ struct apr_hdr hdr;
+ u16 enable;
+ u16 reserved;
+} __attribute__((packed));
+
+/* Data Path commands */
+#define ASM_DATA_CMD_WRITE 0x00010BD9
+struct asm_stream_cmd_write{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 avail_bytes;
+ u32 uid;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 uflags;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_READ 0x00010BDA
+struct asm_stream_cmd_read{
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u32 uid;
+} __attribute__((packed));
+
+#define ASM_DATA_CMD_READ_COMPRESSED 0x00010DBF
+struct asm_stream_cmd_read_compressed {
+ struct apr_hdr hdr;
+ u32 buf_add;
+ u32 buf_size;
+ u32 uid;
+} __packed;
+
+#define ASM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00010BDC
+#define ASM_DATA_EVENT_ENC_SR_CM_NOTIFY 0x00010BDE
+struct asm_stream_media_format_update{
+ struct apr_hdr hdr;
+ u32 format;
+ u32 cfg_size;
+ union {
+ struct asm_pcm_cfg pcm_cfg;
+ struct asm_adpcm_cfg adpcm_cfg;
+ struct asm_yadpcm_cfg yadpcm_cfg;
+ struct asm_midi_cfg midi_cfg;
+ struct asm_wma_cfg wma_cfg;
+ struct asm_wmapro_cfg wmapro_cfg;
+ struct asm_aac_cfg aac_cfg;
+ struct asm_flac_cfg flac_cfg;
+ struct asm_vorbis_cfg vorbis_cfg;
+ struct asm_multi_channel_pcm_fmt_blk multi_ch_pcm_cfg;
+ struct asm_amrwbplus_cfg amrwbplus_cfg;
+ } __attribute__((packed)) write_cfg;
+} __attribute__((packed));
+
+
+/* Command Responses */
+#define ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM 0x00010C12
+struct asm_stream_cmdrsp_get_readwrite_param{
+ struct apr_hdr hdr;
+ u32 status;
+ u32 param_id;
+ u16 param_size;
+ u16 padding;
+ union {
+ struct asm_sbc_bitrate sbc_bitrate;
+ struct asm_immed_decode aac_dec;
+ } __attribute__((packed)) read_write_cfg;
+} __attribute__((packed));
+
+
+#define ASM_SESSION_CMDRSP_GET_SESSION_TIME 0x00010BD8
+struct asm_stream_cmdrsp_get_session_time{
+ struct apr_hdr hdr;
+ u32 status;
+ u32 msw_ts;
+ u32 lsw_ts;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_WRITE_DONE 0x00010BDF
+struct asm_data_event_write_done{
+ u32 buf_add;
+ u32 status;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_READ_DONE 0x00010BE0
+struct asm_data_event_read_done{
+ u32 status;
+ u32 buffer_add;
+ u32 enc_frame_size;
+ u32 offset;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 flags;
+ u32 num_frames;
+ u32 id;
+} __attribute__((packed));
+
+#define ASM_DATA_EVENT_READ_COMPRESSED_DONE 0x00010DC0
+struct asm_data_event_read_compressed_done {
+ u32 status;
+ u32 buffer_add;
+ u32 enc_frame_size;
+ u32 offset;
+ u32 msw_ts;
+ u32 lsw_ts;
+ u32 flags;
+ u32 num_frames;
+ u32 id;
+} __packed;
+
+#define ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY 0x00010C65
+struct asm_data_event_sr_cm_change_notify {
+ u32 sample_rate;
+ u16 no_of_channels;
+ u16 reserved;
+ u8 channel_map[8];
+} __packed;
+
+/* service level events */
+
+#define ASM_SERVICE_CMDRSP_GET_STREAM_HANDLES 0x00010C1B
+struct asm_svc_cmdrsp_get_strm_handles{
+ struct apr_hdr hdr;
+ u32 num_handles;
+ u32 stream_handles;
+} __attribute__((packed));
+
+
+#define ASM_SERVICE_CMDRSP_GET_WALLCLOCK_TIME 0x00010C1A
+struct asm_svc_cmdrsp_get_wallclock_time{
+ struct apr_hdr hdr;
+ u32 status;
+ u32 msw_ts;
+ u32 lsw_ts;
+} __attribute__((packed));
+
+/*
+ * Error code
+*/
+#define ADSP_EOK 0x00000000 /* Success / completed / no errors. */
+#define ADSP_EFAILED 0x00000001 /* General failure. */
+#define ADSP_EBADPARAM 0x00000002 /* Bad operation parameter(s). */
+#define ADSP_EUNSUPPORTED 0x00000003 /* Unsupported routine/operation. */
+#define ADSP_EVERSION 0x00000004 /* Unsupported version. */
+#define ADSP_EUNEXPECTED 0x00000005 /* Unexpected problem encountered. */
+#define ADSP_EPANIC 0x00000006 /* Unhandled problem occurred. */
+#define ADSP_ENORESOURCE 0x00000007 /* Unable to allocate resource(s). */
+#define ADSP_EHANDLE 0x00000008 /* Invalid handle. */
+#define ADSP_EALREADY 0x00000009 /* Operation is already processed. */
+#define ADSP_ENOTREADY 0x0000000A /* Operation not ready to be processed*/
+#define ADSP_EPENDING 0x0000000B /* Operation is pending completion*/
+#define ADSP_EBUSY 0x0000000C /* Operation could not be accepted or
+ processed. */
+#define ADSP_EABORTED 0x0000000D /* Operation aborted due to an error. */
+#define ADSP_EPREEMPTED 0x0000000E /* Operation preempted by higher priority*/
+#define ADSP_ECONTINUE 0x0000000F /* Operation requests intervention
+ to complete. */
+#define ADSP_EIMMEDIATE 0x00000010 /* Operation requests immediate
+ intervention to complete. */
+#define ADSP_ENOTIMPL 0x00000011 /* Operation is not implemented. */
+#define ADSP_ENEEDMORE 0x00000012 /* Operation needs more data or resources*/
+
+/* SRS TRUMEDIA start */
+#define SRS_ID_GLOBAL 0x00000001
+#define SRS_ID_WOWHD 0x00000002
+#define SRS_ID_CSHP 0x00000003
+#define SRS_ID_HPF 0x00000004
+#define SRS_ID_PEQ 0x00000005
+#define SRS_ID_HL 0x00000006
+
+#define SRS_CMD_UPLOAD 0x7FFF0000
+#define SRS_PARAM_INDEX_MASK 0x80000000
+#define SRS_PARAM_OFFSET_MASK 0x3FFF0000
+#define SRS_PARAM_VALUE_MASK 0x0000FFFF
+
+struct srs_trumedia_params_GLOBAL {
+ uint8_t v1;
+ uint8_t v2;
+ uint8_t v3;
+ uint8_t v4;
+ uint8_t v5;
+ uint8_t v6;
+ uint8_t v7;
+ uint8_t v8;
+} __packed;
+
+struct srs_trumedia_params_WOWHD {
+ uint32_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint16_t v4;
+ uint16_t v5;
+ uint16_t v6;
+ uint16_t v7;
+ uint16_t v8;
+ uint16_t v____A1;
+ uint32_t v9;
+ uint16_t v10;
+ uint16_t v11;
+ uint32_t v12[16];
+} __packed;
+
+struct srs_trumedia_params_CSHP {
+ uint32_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint16_t v4;
+ uint16_t v5;
+ uint16_t v6;
+ uint16_t v____A1;
+ uint32_t v7;
+ uint16_t v8;
+ uint16_t v9;
+ uint32_t v10[16];
+} __packed;
+
+struct srs_trumedia_params_HPF {
+ uint32_t v1;
+ uint32_t v2[26];
+} __packed;
+
+struct srs_trumedia_params_PEQ {
+ uint32_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint16_t v4;
+ uint16_t v____A1;
+ uint32_t v5[26];
+ uint32_t v6[26];
+} __packed;
+
+struct srs_trumedia_params_HL {
+ uint16_t v1;
+ uint16_t v2;
+ uint16_t v3;
+ uint16_t v____A1;
+ int32_t v4;
+ uint32_t v5;
+ uint16_t v6;
+ uint16_t v____A2;
+ uint32_t v7;
+} __packed;
+
+struct srs_trumedia_params {
+ struct srs_trumedia_params_GLOBAL global;
+ struct srs_trumedia_params_WOWHD wowhd;
+ struct srs_trumedia_params_CSHP cshp;
+ struct srs_trumedia_params_HPF hpf;
+ struct srs_trumedia_params_PEQ peq;
+ struct srs_trumedia_params_HL hl;
+} __packed;
+int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params);
+/* SRS TruMedia end */
+
+/* SRS Studio Sound 3D start */
+#define SRS_ID_SS3D_GLOBAL 0x00000001
+#define SRS_ID_SS3D_CTRL 0x00000002
+#define SRS_ID_SS3D_FILTER 0x00000003
+
+struct srs_SS3D_params_GLOBAL {
+ uint8_t v1;
+ uint8_t v2;
+ uint8_t v3;
+ uint8_t v4;
+ uint8_t v5;
+ uint8_t v6;
+ uint8_t v7;
+ uint8_t v8;
+} __packed;
+
+struct srs_SS3D_ctrl_params {
+ uint8_t v[236];
+} __packed;
+
+struct srs_SS3D_filter_params {
+ uint8_t v[28 + 2752];
+} __packed;
+
+struct srs_SS3D_params {
+ struct srs_SS3D_params_GLOBAL global;
+ struct srs_SS3D_ctrl_params ss3d;
+ struct srs_SS3D_filter_params ss3d_f;
+} __packed;
+
+int srs_ss3d_open(int port_id, int srs_tech_id, void *srs_params);
+/* SRS Studio Sound 3D end */
+#endif /*_APR_AUDIO_H_*/
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
new file mode 100644
index 0000000000000..15fe70f7caad1
--- /dev/null
+++ b/include/sound/hdmi-codec.h
@@ -0,0 +1,104 @@
+/*
+ * hdmi-codec.h - HDMI Codec driver API
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __HDMI_CODEC_H__
+#define __HDMI_CODEC_H__
+
+#include <linux/hdmi.h>
+#include <drm/drm_edid.h>
+#include <sound/asoundef.h>
+#include <uapi/sound/asound.h>
+
+/*
+ * Protocol between ASoC cpu-dai and HDMI-encoder
+ */
+struct hdmi_codec_daifmt {
+ enum {
+ HDMI_I2S,
+ HDMI_RIGHT_J,
+ HDMI_LEFT_J,
+ HDMI_DSP_A,
+ HDMI_DSP_B,
+ HDMI_AC97,
+ HDMI_SPDIF,
+ } fmt;
+ int bit_clk_inv:1;
+ int frame_clk_inv:1;
+ int bit_clk_master:1;
+ int frame_clk_master:1;
+};
+
+/*
+ * HDMI audio parameters
+ */
+struct hdmi_codec_params {
+ struct hdmi_audio_infoframe cea;
+ struct snd_aes_iec958 iec;
+ int sample_rate;
+ int sample_width;
+ int channels;
+};
+
+struct hdmi_codec_ops {
+ /*
+ * Called when ASoC starts an audio stream setup. The call
+ * provides an audio abort callback for stoping an ongoing
+ * stream from video side driver if the HDMI audio becomes
+ * unavailable.
+ * Optional
+ */
+ int (*audio_startup)(struct device *dev,
+ void (*abort_cb)(struct device *dev));
+
+ /*
+ * Configures HDMI-encoder for audio stream.
+ * Mandatory
+ */
+ int (*hw_params)(struct device *dev,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
+
+ /*
+ * Shuts down the audio stream.
+ * Mandatory
+ */
+ void (*audio_shutdown)(struct device *dev);
+
+ /*
+ * Mute/unmute HDMI audio stream.
+ * Optional
+ */
+ int (*digital_mute)(struct device *dev, bool enable);
+
+ /*
+ * Provides EDID-Like-Data from connected HDMI device.
+ * Optional
+ */
+ int (*get_eld)(struct device *dev, uint8_t *buf, size_t len);
+};
+
+/* HDMI codec initalization data */
+struct hdmi_codec_pdata {
+ const struct hdmi_codec_ops *ops;
+ uint i2s:1;
+ uint spdif:1;
+ int max_i2s_channels;
+};
+
+#define HDMI_CODEC_DRV_NAME "hdmi-audio-codec"
+
+#endif /* __HDMI_CODEC_H__ */
diff --git a/include/sound/msm-dai-q6.h b/include/sound/msm-dai-q6.h
new file mode 100644
index 0000000000000..a39d3dc08d008
--- /dev/null
+++ b/include/sound/msm-dai-q6.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_DAI_Q6_PDATA_H__
+
+#define __MSM_DAI_Q6_PDATA_H__
+
+#define MSM_MI2S_SD0 (1 << 0)
+#define MSM_MI2S_SD1 (1 << 1)
+#define MSM_MI2S_SD2 (1 << 2)
+#define MSM_MI2S_SD3 (1 << 3)
+#define MSM_MI2S_CAP_RX 0
+#define MSM_MI2S_CAP_TX 1
+
+struct msm_dai_auxpcm_config {
+ u16 mode;
+ u16 sync;
+ u16 frame;
+ u16 quant;
+ u16 slot;
+ u16 data;
+ int pcm_clk_rate;
+};
+
+struct msm_mi2s_pdata {
+ u16 rx_sd_lines;
+ u16 tx_sd_lines;
+};
+
+struct msm_dai_auxpcm_pdata {
+ const char *clk;
+ struct msm_dai_auxpcm_config mode_8k;
+ struct msm_dai_auxpcm_config mode_16k;
+};
+
+#endif
diff --git a/include/sound/msm_hdmi_audio.h b/include/sound/msm_hdmi_audio.h
new file mode 100644
index 0000000000000..8ada49f82234e
--- /dev/null
+++ b/include/sound/msm_hdmi_audio.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_HDMI_AUDIO_H
+#define __MSM_HDMI_AUDIO_H
+
+/* Supported HDMI Audio channels */
+#define MSM_HDMI_AUDIO_CHANNEL_2 0
+#define MSM_HDMI_AUDIO_CHANNEL_4 1
+#define MSM_HDMI_AUDIO_CHANNEL_6 2
+#define MSM_HDMI_AUDIO_CHANNEL_8 3
+
+#define TRUE 1
+#define FALSE 0
+
+enum hdmi_supported_sample_rates {
+ HDMI_SAMPLE_RATE_32KHZ,
+ HDMI_SAMPLE_RATE_44_1KHZ,
+ HDMI_SAMPLE_RATE_48KHZ,
+ HDMI_SAMPLE_RATE_88_2KHZ,
+ HDMI_SAMPLE_RATE_96KHZ,
+ HDMI_SAMPLE_RATE_176_4KHZ,
+ HDMI_SAMPLE_RATE_192KHZ
+};
+
+int hdmi_audio_enable(bool on , u32 fifo_water_mark);
+int hdmi_audio_packet_enable(bool on);
+int hdmi_msm_audio_get_sample_rate(void);
+
+#if defined(CONFIG_FB_MSM_HDMI_MSM_PANEL) || defined(CONFIG_DRM_MSM)
+int hdmi_msm_audio_info_setup(bool enabled, u32 num_of_channels,
+ u32 channel_allocation, u32 level_shift, bool down_mix);
+void hdmi_msm_audio_sample_rate_reset(int rate);
+#else
+static inline int hdmi_msm_audio_info_setup(bool enabled,
+ u32 num_of_channels, u32 channel_allocation, u32 level_shift,
+ bool down_mix)
+{
+ return 0;
+}
+static inline void hdmi_msm_audio_sample_rate_reset(int rate)
+{
+}
+#endif
+#endif /* __MSM_HDMI_AUDIO_H*/
diff --git a/include/sound/pcm_iec958.h b/include/sound/pcm_iec958.h
index 0eed397aca8e8..36f023acb201e 100644
--- a/include/sound/pcm_iec958.h
+++ b/include/sound/pcm_iec958.h
@@ -6,4 +6,6 @@
int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
size_t len);
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len);
#endif
diff --git a/include/sound/q6adm.h b/include/sound/q6adm.h
new file mode 100644
index 0000000000000..cb3273f5119b0
--- /dev/null
+++ b/include/sound/q6adm.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ADM_H__
+#define __Q6_ADM_H__
+#include <sound/q6afe.h>
+
+#define ADM_PATH_PLAYBACK 0x1
+#define ADM_PATH_LIVE_REC 0x2
+#define ADM_PATH_NONLIVE_REC 0x3
+
+/* multiple copp per stream. */
+struct route_payload {
+ unsigned int copp_ids[AFE_MAX_PORTS];
+ unsigned short num_copps;
+ unsigned int session_id;
+};
+
+int adm_open(int port, int path, int rate, int mode, int topology);
+
+int adm_multi_ch_copp_open(int port, int path, int rate, int mode,
+ int topology, int perfmode);
+
+int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
+ uint32_t *bufsz, uint32_t bufcnt);
+
+int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
+ uint32_t bufcnt);
+
+int adm_close(int port);
+
+int adm_matrix_map(int session_id, int path, int num_copps,
+ unsigned int *port_id, int copp_id);
+
+int adm_connect_afe_port(int mode, int session_id, int port_id);
+int adm_disconnect_afe_port(int mode, int session_id, int port_id);
+
+void adm_ec_ref_rx_id(int port_id);
+
+int adm_get_copp_id(int port_id);
+
+#endif /* __Q6_ADM_H__ */
diff --git a/include/sound/q6afe.h b/include/sound/q6afe.h
new file mode 100644
index 0000000000000..1b7a79093c1b9
--- /dev/null
+++ b/include/sound/q6afe.h
@@ -0,0 +1,110 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6AFE_H__
+#define __Q6AFE_H__
+#include <sound/apr_audio.h>
+
+#define MSM_AFE_MONO 0
+#define MSM_AFE_MONO_RIGHT 1
+#define MSM_AFE_MONO_LEFT 2
+#define MSM_AFE_STEREO 3
+#define MSM_AFE_4CHANNELS 4
+#define MSM_AFE_6CHANNELS 6
+#define MSM_AFE_8CHANNELS 8
+
+#define MSM_AFE_I2S_FORMAT_LPCM 0
+#define MSM_AFE_I2S_FORMAT_COMPR 1
+#define MSM_AFE_I2S_FORMAT_IEC60958_LPCM 2
+#define MSM_AFE_I2S_FORMAT_IEC60958_COMPR 3
+
+#define MSM_AFE_PORT_TYPE_RX 0
+#define MSM_AFE_PORT_TYPE_TX 1
+
+#define RT_PROXY_DAI_001_RX 0xE0
+#define RT_PROXY_DAI_001_TX 0xF0
+#define RT_PROXY_DAI_002_RX 0xF1
+#define RT_PROXY_DAI_002_TX 0xE1
+#define VIRTUAL_ID_TO_PORTID(val) ((val & 0xF) | 0x2000)
+
+enum {
+ IDX_PRIMARY_I2S_RX = 0,
+ IDX_PRIMARY_I2S_TX = 1,
+ IDX_PCM_RX = 2,
+ IDX_PCM_TX = 3,
+ IDX_SECONDARY_I2S_RX = 4,
+ IDX_SECONDARY_I2S_TX = 5,
+ IDX_MI2S_RX = 6,
+ IDX_MI2S_TX = 7,
+ IDX_HDMI_RX = 8,
+ IDX_RSVD_2 = 9,
+ IDX_RSVD_3 = 10,
+ IDX_DIGI_MIC_TX = 11,
+ IDX_VOICE_RECORD_RX = 12,
+ IDX_VOICE_RECORD_TX = 13,
+ IDX_VOICE_PLAYBACK_TX = 14,
+ IDX_SLIMBUS_0_RX = 15,
+ IDX_SLIMBUS_0_TX = 16,
+ IDX_SLIMBUS_1_RX = 17,
+ IDX_SLIMBUS_1_TX = 18,
+ IDX_SLIMBUS_2_RX = 19,
+ IDX_SLIMBUS_2_TX = 20,
+ IDX_SLIMBUS_3_RX = 21,
+ IDX_SLIMBUS_3_TX = 22,
+ IDX_SLIMBUS_4_RX = 23,
+ IDX_SLIMBUS_4_TX = 24,
+ IDX_INT_BT_SCO_RX = 25,
+ IDX_INT_BT_SCO_TX = 26,
+ IDX_INT_BT_A2DP_RX = 27,
+ IDX_INT_FM_RX = 28,
+ IDX_INT_FM_TX = 29,
+ IDX_RT_PROXY_PORT_001_RX = 30,
+ IDX_RT_PROXY_PORT_001_TX = 31,
+ IDX_SECONDARY_PCM_RX = 32,
+ IDX_SECONDARY_PCM_TX = 33,
+ AFE_MAX_PORTS
+};
+
+int afe_open(u16 port_id, union afe_port_config *afe_config, int rate);
+int afe_close(int port_id);
+int afe_loopback(u16 enable, u16 rx_port, u16 tx_port);
+int afe_loopback_cfg(u16 enable, u16 dst_port, u16 src_port, u16 mode);
+int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain);
+int afe_loopback_gain(u16 port_id, u16 volume);
+int afe_validate_port(u16 port_id);
+int afe_get_port_index(u16 port_id);
+int afe_start_pseudo_port(u16 port_id);
+int afe_stop_pseudo_port(u16 port_id);
+int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz);
+int afe_cmd_memory_map_nowait(u32 dma_addr_p, u32 dma_buf_sz);
+int afe_cmd_memory_unmap(u32 dma_addr_p);
+int afe_cmd_memory_unmap_nowait(u32 dma_addr_p);
+
+int afe_register_get_events(u16 port_id,
+ void (*cb) (uint32_t opcode,
+ uint32_t token, uint32_t *payload, void *priv),
+ void *private_data);
+int afe_unregister_get_events(u16 port_id);
+int afe_rt_proxy_port_write(u32 buf_addr_p, int bytes);
+int afe_rt_proxy_port_read(u32 buf_addr_p, int bytes);
+int afe_port_start(u16 port_id, union afe_port_config *afe_config, u32 rate);
+int afe_port_stop_nowait(int port_id);
+int afe_apply_gain(u16 port_id, u16 gain);
+int afe_q6_interface_prepare(void);
+int afe_get_port_type(u16 port_id);
+/* if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+int afe_convert_virtual_to_portid(u16 port_id);
+
+int afe_pseudo_port_start_nowait(u16 port_id);
+int afe_pseudo_port_stop_nowait(u16 port_id);
+#endif /* __Q6AFE_H__ */
diff --git a/include/sound/q6asm.h b/include/sound/q6asm.h
new file mode 100644
index 0000000000000..139b501a35688
--- /dev/null
+++ b/include/sound/q6asm.h
@@ -0,0 +1,334 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __Q6_ASM_H__
+#define __Q6_ASM_H__
+
+#include <sound/qdsp6v2/apr.h>
+#include <sound/apr_audio.h>
+
+#define IN 0x000
+#define OUT 0x001
+#define CH_MODE_MONO 0x001
+#define CH_MODE_STEREO 0x002
+
+#define FORMAT_LINEAR_PCM 0x0000
+#define FORMAT_DTMF 0x0001
+#define FORMAT_ADPCM 0x0002
+#define FORMAT_YADPCM 0x0003
+#define FORMAT_MP3 0x0004
+#define FORMAT_MPEG4_AAC 0x0005
+#define FORMAT_AMRNB 0x0006
+#define FORMAT_AMRWB 0x0007
+#define FORMAT_V13K 0x0008
+#define FORMAT_EVRC 0x0009
+#define FORMAT_EVRCB 0x000a
+#define FORMAT_EVRCWB 0x000b
+#define FORMAT_MIDI 0x000c
+#define FORMAT_SBC 0x000d
+#define FORMAT_WMA_V10PRO 0x000e
+#define FORMAT_WMA_V9 0x000f
+#define FORMAT_AMR_WB_PLUS 0x0010
+#define FORMAT_MPEG4_MULTI_AAC 0x0011
+#define FORMAT_MULTI_CHANNEL_LINEAR_PCM 0x0012
+#define FORMAT_AC3 0x0013
+#define FORMAT_DTS 0x0014
+#define FORMAT_EAC3 0x0015
+#define FORMAT_ATRAC 0x0016
+#define FORMAT_MAT 0x0017
+#define FORMAT_AAC 0x0018
+#define FORMAT_DTS_LBR 0x0019
+
+#define ENCDEC_SBCBITRATE 0x0001
+#define ENCDEC_IMMEDIATE_DECODE 0x0002
+#define ENCDEC_CFG_BLK 0x0003
+
+#define CMD_PAUSE 0x0001
+#define CMD_FLUSH 0x0002
+#define CMD_EOS 0x0003
+#define CMD_CLOSE 0x0004
+#define CMD_OUT_FLUSH 0x0005
+
+/* bit 0:1 represents priority of stream */
+#define STREAM_PRIORITY_NORMAL 0x0000
+#define STREAM_PRIORITY_LOW 0x0001
+#define STREAM_PRIORITY_HIGH 0x0002
+
+/* bit 4 represents META enable of encoded data buffer */
+#define BUFFER_META_ENABLE 0x0010
+
+/* Enable Sample_Rate/Channel_Mode notification event from Decoder */
+#define SR_CM_NOTIFY_ENABLE 0x0004
+
+#define TUN_WRITE_IO_MODE 0x0008 /* tunnel read write mode */
+#define TUN_READ_IO_MODE 0x0004 /* tunnel read write mode */
+#define ASYNC_IO_MODE 0x0002
+#define SYNC_IO_MODE 0x0001
+#define NO_TIMESTAMP 0xFF00
+#define SET_TIMESTAMP 0x0000
+
+#define SOFT_PAUSE_ENABLE 1
+#define SOFT_PAUSE_DISABLE 0
+
+#define SESSION_MAX 0x08
+
+#define SOFT_PAUSE_PERIOD 30 /* ramp up/down for 30ms */
+#define SOFT_PAUSE_STEP_LINEAR 0 /* Step value 0ms or 0us */
+#define SOFT_PAUSE_STEP 0 /* Step value 0ms or 0us */
+enum {
+ SOFT_PAUSE_CURVE_LINEAR = 0,
+ SOFT_PAUSE_CURVE_EXP,
+ SOFT_PAUSE_CURVE_LOG,
+};
+
+#define SOFT_VOLUME_PERIOD 30 /* ramp up/down for 30ms */
+#define SOFT_VOLUME_STEP_LINEAR 0 /* Step value 0ms or 0us */
+#define SOFT_VOLUME_STEP 0 /* Step value 0ms or 0us */
+enum {
+ SOFT_VOLUME_CURVE_LINEAR = 0,
+ SOFT_VOLUME_CURVE_EXP,
+ SOFT_VOLUME_CURVE_LOG,
+};
+
+typedef void (*app_cb)(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv);
+
+struct audio_buffer {
+ dma_addr_t phys;
+ void *data;
+ uint32_t used;
+ uint32_t size;/* size of buffer */
+ uint32_t actual_size; /* actual number of bytes read by DSP */
+ void *mem_buffer;
+};
+
+struct audio_aio_write_param {
+ unsigned long paddr;
+ uint32_t uid;
+ uint32_t len;
+ uint32_t msw_ts;
+ uint32_t lsw_ts;
+ uint32_t flags;
+};
+
+struct audio_aio_read_param {
+ unsigned long paddr;
+ uint32_t len;
+ uint32_t uid;
+};
+
+struct audio_port_data {
+ struct audio_buffer *buf;
+ uint32_t max_buf_cnt;
+ uint32_t dsp_buf;
+ uint32_t cpu_buf;
+ /* read or write locks */
+ struct mutex lock;
+ spinlock_t dsp_lock;
+};
+
+struct audio_client {
+ int session;
+ /* idx:1 out port, 0: in port*/
+ struct audio_port_data port[2];
+
+ struct apr_svc *apr;
+ struct mutex cmd_lock;
+
+ atomic_t cmd_state;
+ atomic_t cmd_close_state;
+ atomic_t time_flag;
+ atomic_t nowait_cmd_cnt;
+ wait_queue_head_t cmd_wait;
+ wait_queue_head_t time_wait;
+
+ app_cb cb;
+ void *priv;
+ uint32_t io_mode;
+ uint64_t time_stamp;
+ atomic_t cmd_response;
+ bool perf_mode;
+};
+
+void q6asm_audio_client_free(struct audio_client *ac);
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv);
+
+struct audio_client *q6asm_get_audio_client(int session_id);
+
+int q6asm_audio_client_buf_alloc(unsigned int dir/* 1:Out,0:In */,
+ struct audio_client *ac,
+ unsigned int bufsz,
+ unsigned int bufcnt);
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir
+ /* 1:Out,0:In */,
+ struct audio_client *ac,
+ unsigned int bufsz,
+ unsigned int bufcnt);
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+ struct audio_client *ac);
+
+int q6asm_open_read(struct audio_client *ac, uint32_t format);
+int q6asm_open_read_v2_1(struct audio_client *ac, uint32_t format);
+
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode);
+
+int q6asm_open_write(struct audio_client *ac, uint32_t format);
+
+int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format);
+
+int q6asm_open_read_write(struct audio_client *ac,
+ uint32_t rd_format,
+ uint32_t wr_format);
+
+int q6asm_open_loopack(struct audio_client *ac);
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags);
+
+int q6asm_async_write(struct audio_client *ac,
+ struct audio_aio_write_param *param);
+
+int q6asm_async_read(struct audio_client *ac,
+ struct audio_aio_read_param *param);
+
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param);
+
+int q6asm_read(struct audio_client *ac);
+int q6asm_read_nolock(struct audio_client *ac);
+
+int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add,
+ int dir, uint32_t bufsz, uint32_t bufcnt);
+
+int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add,
+ int dir);
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts);
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable);
+
+int q6asm_cmd(struct audio_client *ac, int cmd);
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd);
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac,
+ uint32_t *size, uint32_t *idx);
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+ uint32_t *size, uint32_t *idx);
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac);
+
+/* File format specific configurations to be added below */
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+ uint32_t frames_per_buf,
+ uint32_t sample_rate, uint32_t channels,
+ uint32_t bit_rate,
+ uint32_t mode, uint32_t format);
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
+int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+ uint32_t sbr_ps);
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+ uint16_t sce_left, uint16_t sce_right);
+
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff);
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+ uint32_t num_channels);
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t min_rate, uint16_t max_rate,
+ uint16_t reduced_rate_level, uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t min_rate, uint16_t max_rate,
+ uint16_t rate_modulation_cmd);
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t band_mode, uint16_t dtx_enable);
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels);
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+ struct asm_aac_cfg *cfg);
+
+int q6asm_media_format_block_amrwbplus(struct audio_client *ac,
+ struct asm_amrwbplus_cfg *cfg);
+
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+ struct asm_aac_cfg *cfg);
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+ void *cfg);
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+ void *cfg);
+
+/* PP specific */
+int q6asm_equalizer(struct audio_client *ac, void *eq);
+
+/* Send Volume Command */
+int q6asm_set_volume(struct audio_client *ac, int volume);
+
+/* Set SoftPause Params */
+int q6asm_set_softpause(struct audio_client *ac,
+ struct asm_softpause_params *param);
+
+/* Set Softvolume Params */
+int q6asm_set_softvolume(struct audio_client *ac,
+ struct asm_softvolume_params *param);
+
+/* Send left-right channel gain */
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain);
+
+/* Enable Mute/unmute flag */
+int q6asm_set_mute(struct audio_client *ac, int muteflag);
+
+int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp);
+
+/* Client can set the IO mode to either AIO/SIO mode */
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode);
+
+/* Get Service ID for APR communication */
+int q6asm_get_apr_service_id(int session_id);
+
+/* Common format block without any payload
+*/
+int q6asm_media_format_block(struct audio_client *ac, uint32_t format);
+
+#endif /* __Q6_ASM_H__ */
diff --git a/include/sound/qdsp6v2/apr.h b/include/sound/qdsp6v2/apr.h
new file mode 100644
index 0000000000000..9473989caff39
--- /dev/null
+++ b/include/sound/qdsp6v2/apr.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APR_H_
+#define __APR_H_
+
+#include <linux/mutex.h>
+
+enum apr_subsys_state {
+ APR_SUBSYS_DOWN,
+ APR_SUBSYS_UP,
+ APR_SUBSYS_LOADED,
+};
+
+struct apr_q6 {
+// void *pil;
+ struct rproc *rproc;
+ atomic_t q6_state;
+ atomic_t modem_state;
+ struct mutex lock;
+};
+
+struct apr_hdr {
+ uint16_t hdr_field;
+ uint16_t pkt_size;
+ uint8_t src_svc;
+ uint8_t src_domain;
+ uint16_t src_port;
+ uint8_t dest_svc;
+ uint8_t dest_domain;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+};
+
+#define APR_HDR_LEN(hdr_len) ((hdr_len)/4)
+#define APR_PKT_SIZE(hdr_len, payload_len) ((hdr_len) + (payload_len))
+#define APR_HDR_FIELD(msg_type, hdr_len, ver)\
+ (((msg_type & 0x3) << 8) | ((hdr_len & 0xF) << 4) | (ver & 0xF))
+
+#define APR_HDR_SIZE sizeof(struct apr_hdr)
+
+/* Version */
+#define APR_PKT_VER 0x0
+
+/* Command and Response Types */
+#define APR_MSG_TYPE_EVENT 0x0
+#define APR_MSG_TYPE_CMD_RSP 0x1
+#define APR_MSG_TYPE_SEQ_CMD 0x2
+#define APR_MSG_TYPE_NSEQ_CMD 0x3
+#define APR_MSG_TYPE_MAX 0x04
+
+/* APR Basic Response Message */
+#define APR_BASIC_RSP_RESULT 0x000110E8
+#define APR_RSP_ACCEPTED 0x000100BE
+
+/* Domain IDs */
+#define APR_DOMAIN_SIM 0x1
+#define APR_DOMAIN_PC 0x2
+#define APR_DOMAIN_MODEM 0x3
+#define APR_DOMAIN_ADSP 0x4
+#define APR_DOMAIN_APPS 0x5
+#define APR_DOMAIN_MAX 0x6
+
+/* ADSP service IDs */
+#define APR_SVC_TEST_CLIENT 0x2
+#define APR_SVC_ADSP_CORE 0x3
+#define APR_SVC_AFE 0x4
+#define APR_SVC_VSM 0x5
+#define APR_SVC_VPM 0x6
+#define APR_SVC_ASM 0x7
+#define APR_SVC_ADM 0x8
+#define APR_SVC_ADSP_MVM 0x09
+#define APR_SVC_ADSP_CVS 0x0A
+#define APR_SVC_ADSP_CVP 0x0B
+#define APR_SVC_USM 0x0C
+#define APR_SVC_MAX 0x0D
+
+/* Modem Service IDs */
+#define APR_SVC_MVS 0x3
+#define APR_SVC_MVM 0x4
+#define APR_SVC_CVS 0x5
+#define APR_SVC_CVP 0x6
+#define APR_SVC_SRD 0x7
+
+/* APR Port IDs */
+#define APR_MAX_PORTS 0x40
+
+#define APR_NAME_MAX 0x40
+
+#define RESET_EVENTS 0xFFFFFFFF
+
+#define LPASS_RESTART_EVENT 0x1000
+#define LPASS_RESTART_READY 0x1001
+
+struct apr_client_data {
+ uint16_t reset_event;
+ uint16_t reset_proc;
+ uint16_t payload_size;
+ uint16_t hdr_len;
+ uint16_t msg_type;
+ uint16_t src;
+ uint16_t dest_svc;
+ uint16_t src_port;
+ uint16_t dest_port;
+ uint32_t token;
+ uint32_t opcode;
+ void *payload;
+};
+
+typedef int32_t (*apr_fn)(struct apr_client_data *data, void *priv);
+
+struct apr_svc {
+ uint16_t id;
+ uint16_t dest_id;
+ uint16_t client_id;
+ uint8_t rvd;
+ uint8_t port_cnt;
+ uint8_t svc_cnt;
+ uint8_t need_reset;
+ apr_fn port_fn[APR_MAX_PORTS];
+ void *port_priv[APR_MAX_PORTS];
+ apr_fn fn;
+ void *priv;
+ struct mutex m_lock;
+ spinlock_t w_lock;
+};
+
+struct apr_client {
+ uint8_t id;
+ uint8_t svc_cnt;
+ uint8_t rvd;
+ struct mutex m_lock;
+ struct apr_svc_ch_dev *handle;
+ struct apr_svc svc[APR_SVC_MAX];
+};
+
+int apr_load_adsp_image(void);
+struct apr_client *apr_get_client(int dest_id, int client_id);
+int apr_wait_for_device_up(int dest_id);
+int apr_get_svc(const char *svc_name, int dest_id, int *client_id,
+ int *svc_idx, int *svc_id);
+void apr_cb_func(void *buf, int len, void *priv);
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+ uint32_t src_port, void *priv);
+inline int apr_fill_hdr(void *handle, uint32_t *buf, uint16_t src_port,
+ uint16_t msg_type, uint16_t dest_port,
+ uint32_t token, uint32_t opcode, uint16_t len);
+
+int apr_send_pkt(void *handle, uint32_t *buf);
+int apr_deregister(void *handle);
+void change_q6_state(int state);
+void q6audio_dsp_not_responding(void);
+void apr_reset(void *handle);
+enum apr_subsys_state apr_get_modem_state(void);
+void apr_set_modem_state(enum apr_subsys_state state);
+enum apr_subsys_state apr_get_q6_state(void);
+int apr_set_q6_state(enum apr_subsys_state state);
+void apr_set_subsys_state(void);
+#endif
diff --git a/include/sound/qdsp6v2/apr_tal.h b/include/sound/qdsp6v2/apr_tal.h
new file mode 100644
index 0000000000000..69170b98629ea
--- /dev/null
+++ b/include/sound/qdsp6v2/apr_tal.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __APR_TAL_H_
+#define __APR_TAL_H_
+
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+
+/* APR Client IDs */
+#define APR_CLIENT_AUDIO 0x0
+#define APR_CLIENT_VOICE 0x1
+#define APR_CLIENT_MAX 0x2
+
+#define APR_DL_SMD 0
+#define APR_DL_MAX 1
+
+#define APR_DEST_MODEM 0
+#define APR_DEST_QDSP6 1
+#define APR_DEST_MAX 2
+
+#define APR_MAX_BUF 8192
+
+#define APR_OPEN_TIMEOUT_MS 5000
+
+typedef void (*apr_svc_cb_fn)(void *buf, int len, void *priv);
+struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest,
+ uint32_t dl, apr_svc_cb_fn func, void *priv);
+int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len);
+int apr_tal_close(struct apr_svc_ch_dev *apr_ch);
+struct apr_svc_ch_dev {
+ struct qcom_smd_channel *ch;
+ spinlock_t lock;
+ apr_svc_cb_fn func;
+ char data[APR_MAX_BUF];
+ wait_queue_head_t wait;
+ void *priv;
+ wait_queue_head_t dest;
+ uint32_t dest_state;
+};
+
+#endif
diff --git a/include/sound/qdsp6v2/audio_acdb.h b/include/sound/qdsp6v2/audio_acdb.h
new file mode 100644
index 0000000000000..9af653c76e6c8
--- /dev/null
+++ b/include/sound/qdsp6v2/audio_acdb.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _AUDIO_ACDB_H
+#define _AUDIO_ACDB_H
+
+#include <linux/msm_audio_acdb.h>
+#include <sound/q6adm.h>
+enum {
+ RX_CAL,
+ TX_CAL,
+ MAX_AUDPROC_TYPES
+};
+
+struct acdb_cal_block {
+ uint32_t cal_size;
+ uint32_t cal_kvaddr;
+ uint32_t cal_paddr;
+};
+
+struct acdb_atomic_cal_block {
+ atomic_t cal_size;
+ atomic_t cal_kvaddr;
+ atomic_t cal_paddr;
+};
+
+struct acdb_cal_data {
+ uint32_t num_cal_blocks;
+ struct acdb_atomic_cal_block *cal_blocks;
+};
+
+uint32_t get_voice_rx_topology(void);
+uint32_t get_voice_tx_topology(void);
+uint32_t get_adm_rx_topology(void);
+uint32_t get_adm_tx_topology(void);
+uint32_t get_asm_topology(void);
+void get_all_voice_cal(struct acdb_cal_block *cal_block);
+void get_all_cvp_cal(struct acdb_cal_block *cal_block);
+void get_all_vocproc_cal(struct acdb_cal_block *cal_block);
+void get_all_vocstrm_cal(struct acdb_cal_block *cal_block);
+void get_all_vocvol_cal(struct acdb_cal_block *cal_block);
+void get_anc_cal(struct acdb_cal_block *cal_block);
+void get_afe_cal(int32_t path, struct acdb_cal_block *cal_block);
+void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block);
+void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block);
+void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block);
+void get_vocproc_cal(struct acdb_cal_data *cal_data);
+void get_vocstrm_cal(struct acdb_cal_data *cal_data);
+void get_vocvol_cal(struct acdb_cal_data *cal_data);
+void get_sidetone_cal(struct sidetone_cal *cal_data);
+
+#endif
diff --git a/include/sound/qdsp6v2/audio_def.h b/include/sound/qdsp6v2/audio_def.h
new file mode 100644
index 0000000000000..35a4d5c2f7947
--- /dev/null
+++ b/include/sound/qdsp6v2/audio_def.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2009,2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MACH_QDSP5_V2_AUDIO_DEF_H
+#define _MACH_QDSP5_V2_AUDIO_DEF_H
+
+/* Define sound device capability */
+#define SNDDEV_CAP_RX 0x1 /* RX direction */
+#define SNDDEV_CAP_TX 0x2 /* TX direction */
+#define SNDDEV_CAP_VOICE 0x4 /* Support voice call */
+#define SNDDEV_CAP_PLAYBACK 0x8 /* Support playback */
+#define SNDDEV_CAP_FM 0x10 /* Support FM radio */
+#define SNDDEV_CAP_TTY 0x20 /* Support TTY */
+#define SNDDEV_CAP_ANC 0x40 /* Support ANC */
+#define SNDDEV_CAP_LB 0x80 /* Loopback */
+#define VOC_NB_INDEX 0
+#define VOC_WB_INDEX 1
+#define VOC_RX_VOL_ARRAY_NUM 2
+
+/* Device volume types . In Current deisgn only one of these are supported. */
+#define SNDDEV_DEV_VOL_DIGITAL 0x1 /* Codec Digital volume control */
+#define SNDDEV_DEV_VOL_ANALOG 0x2 /* Codec Analog volume control */
+
+#define SIDE_TONE_MASK 0x01
+
+#endif /* _MACH_QDSP5_V2_AUDIO_DEF_H */
diff --git a/include/sound/qdsp6v2/audio_dev_ctl.h b/include/sound/qdsp6v2/audio_dev_ctl.h
new file mode 100644
index 0000000000000..403dc6eccdafd
--- /dev/null
+++ b/include/sound/qdsp6v2/audio_dev_ctl.h
@@ -0,0 +1,221 @@
+/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __MACH_QDSP6_V2_SNDDEV_H
+#define __MACH_QDSP6_V2_SNDDEV_H
+#include <sound/qdsp6v2/audio_def.h>
+#include <sound/q6afe.h>
+
+#define AUDIO_DEV_CTL_MAX_DEV 64
+#define DIR_TX 2
+#define DIR_RX 1
+
+#define DEVICE_IGNORE 0xffff
+#define COPP_IGNORE 0xffffffff
+#define SESSION_IGNORE 0x0UL
+
+/* 8 concurrent sessions with Q6 possible, session:0
+ reserved in DSP */
+#define MAX_SESSIONS 0x09
+
+/* This represents Maximum bit needed for representing sessions
+ per clients, MAX_BIT_PER_CLIENT >= MAX_SESSIONS */
+#define MAX_BIT_PER_CLIENT 16
+
+#define VOICE_STATE_INVALID 0x0
+#define VOICE_STATE_INCALL 0x1
+#define VOICE_STATE_OFFCALL 0x2
+#define ONE_TO_MANY 1
+#define MANY_TO_ONE 2
+
+struct msm_snddev_info {
+ const char *name;
+ u32 capability;
+ u32 copp_id;
+ u32 acdb_id;
+ u32 dev_volume;
+ struct msm_snddev_ops {
+ int (*open)(struct msm_snddev_info *);
+ int (*close)(struct msm_snddev_info *);
+ int (*set_freq)(struct msm_snddev_info *, u32);
+ int (*enable_sidetone)(struct msm_snddev_info *, u32, uint16_t);
+ int (*set_device_volume)(struct msm_snddev_info *, u32);
+ int (*enable_anc)(struct msm_snddev_info *, u32);
+ } dev_ops;
+ u8 opened;
+ void *private_data;
+ bool state;
+ u32 sample_rate;
+ u32 channel_mode;
+ u32 set_sample_rate;
+ u64 sessions;
+ int usage_count;
+ s32 max_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* [0] is for NB,[1] for WB */
+ s32 min_voc_rx_vol[VOC_RX_VOL_ARRAY_NUM];
+};
+
+struct msm_volume {
+ int volume; /* Volume parameter, in % Scale */
+ int pan;
+};
+
+extern struct msm_volume msm_vol_ctl;
+
+void msm_snddev_register(struct msm_snddev_info *);
+void msm_snddev_unregister(struct msm_snddev_info *);
+int msm_snddev_devcount(void);
+int msm_snddev_query(int dev_id);
+unsigned short msm_snddev_route_dec(int popp_id);
+unsigned short msm_snddev_route_enc(int enc_id);
+
+int msm_snddev_set_dec(int popp_id, int copp_id, int set,
+ int rate, int channel_mode);
+int msm_snddev_set_enc(int popp_id, int copp_id, int set,
+ int rate, int channel_mode);
+
+int msm_snddev_is_set(int popp_id, int copp_id);
+int msm_get_voc_route(u32 *rx_id, u32 *tx_id);
+int msm_set_voc_route(struct msm_snddev_info *dev_info, int stream_type,
+ int dev_id);
+int msm_snddev_enable_sidetone(u32 dev_id, u32 enable, uint16_t gain);
+
+int msm_set_copp_id(int session_id, int copp_id);
+
+int msm_clear_copp_id(int session_id, int copp_id);
+
+int msm_clear_session_id(int session_id);
+
+int msm_reset_all_device(void);
+
+int reset_device(void);
+
+int msm_clear_all_session(void);
+
+struct msm_snddev_info *audio_dev_ctrl_find_dev(u32 dev_id);
+
+void msm_release_voc_thread(void);
+
+int snddev_voice_set_volume(int vol, int path);
+
+struct auddev_evt_voc_devinfo {
+ u32 dev_type; /* Rx or Tx */
+ u32 acdb_dev_id; /* acdb id of device */
+ u32 dev_sample; /* Sample rate of device */
+ s32 max_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb (milibel),
+ [0] is for NB, other for WB */
+ s32 min_rx_vol[VOC_RX_VOL_ARRAY_NUM]; /* unit is mb */
+ u32 dev_id; /* registered device id */
+ u32 dev_port_id;
+};
+
+struct auddev_evt_audcal_info {
+ u32 dev_id;
+ u32 acdb_id;
+ u32 sample_rate;
+ u32 dev_type;
+ u32 sessions;
+};
+
+union msm_vol_mute {
+ int vol;
+ bool mute;
+};
+
+struct auddev_evt_voc_mute_info {
+ u32 dev_type;
+ u32 acdb_dev_id;
+ u32 voice_session_id;
+ union msm_vol_mute dev_vm_val;
+};
+
+struct auddev_evt_freq_info {
+ u32 dev_type;
+ u32 acdb_dev_id;
+ u32 sample_rate;
+};
+
+union auddev_evt_data {
+ struct auddev_evt_voc_devinfo voc_devinfo;
+ struct auddev_evt_voc_mute_info voc_vm_info;
+ struct auddev_evt_freq_info freq_info;
+ u32 routing_id;
+ s32 session_vol;
+ s32 voice_state;
+ struct auddev_evt_audcal_info audcal_info;
+ u32 voice_session_id;
+};
+
+struct message_header {
+ uint32_t id;
+ uint32_t data_len;
+};
+
+#define AUDDEV_EVT_DEV_CHG_VOICE 0x01 /* device change event */
+#define AUDDEV_EVT_DEV_RDY 0x02 /* device ready event */
+#define AUDDEV_EVT_DEV_RLS 0x04 /* device released event */
+#define AUDDEV_EVT_REL_PENDING 0x08 /* device release pending */
+#define AUDDEV_EVT_DEVICE_VOL_MUTE_CHG 0x10 /* device volume changed */
+#define AUDDEV_EVT_START_VOICE 0x20 /* voice call start */
+#define AUDDEV_EVT_END_VOICE 0x40 /* voice call end */
+#define AUDDEV_EVT_STREAM_VOL_CHG 0x80 /* device volume changed */
+#define AUDDEV_EVT_FREQ_CHG 0x100 /* Change in freq */
+#define AUDDEV_EVT_VOICE_STATE_CHG 0x200 /* Change in voice state */
+
+#define AUDDEV_CLNT_VOC 0x1 /*Vocoder clients*/
+#define AUDDEV_CLNT_DEC 0x2 /*Decoder clients*/
+#define AUDDEV_CLNT_ENC 0x3 /* Encoder clients */
+#define AUDDEV_CLNT_AUDIOCAL 0x4 /* AudioCalibration client */
+
+#define AUDIO_DEV_CTL_MAX_LISTNER 20 /* Max Listeners Supported */
+
+struct msm_snd_evt_listner {
+ uint32_t evt_id;
+ uint32_t clnt_type;
+ uint32_t clnt_id;
+ void *private_data;
+ void (*auddev_evt_listener)(u32 evt_id,
+ union auddev_evt_data *evt_payload,
+ void *private_data);
+ struct msm_snd_evt_listner *cb_next;
+ struct msm_snd_evt_listner *cb_prev;
+};
+
+struct event_listner {
+ struct msm_snd_evt_listner *cb;
+ u32 num_listner;
+ int state; /* Call state */ /* TODO remove this if not req*/
+};
+
+extern struct event_listner event;
+int auddev_register_evt_listner(u32 evt_id, u32 clnt_type, u32 clnt_id,
+ void (*listner)(u32 evt_id,
+ union auddev_evt_data *evt_payload,
+ void *private_data),
+ void *private_data);
+int auddev_unregister_evt_listner(u32 clnt_type, u32 clnt_id);
+void mixer_post_event(u32 evt_id, u32 dev_id);
+void broadcast_event(u32 evt_id, u32 dev_id, u64 session_id);
+int auddev_cfg_tx_copp_topology(int session_id, int cfg);
+int msm_snddev_request_freq(int *freq, u32 session_id,
+ u32 capability, u32 clnt_type);
+int msm_snddev_withdraw_freq(u32 session_id,
+ u32 capability, u32 clnt_type);
+int msm_device_is_voice(int dev_id);
+int msm_get_voc_freq(int *tx_freq, int *rx_freq);
+int msm_snddev_get_enc_freq(int session_id);
+int msm_set_voice_vol(int dir, s32 volume, u32 session_id);
+int msm_set_voice_mute(int dir, int mute, u32 session_id);
+int msm_get_voice_state(void);
+int msm_enable_incall_recording(int popp_id, int rec_mode, int rate,
+ int channel_mode);
+int msm_disable_incall_recording(uint32_t popp_id, uint32_t rec_mode);
+#endif
diff --git a/include/sound/qdsp6v2/dsp_debug.h b/include/sound/qdsp6v2/dsp_debug.h
new file mode 100644
index 0000000000000..bc1cd9ec8743f
--- /dev/null
+++ b/include/sound/qdsp6v2/dsp_debug.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2010, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __DSP_DEBUG_H_
+#define __DSP_DEBUG_H_
+
+typedef int (*dsp_state_cb)(int state);
+int dsp_debug_register(dsp_state_cb ptr);
+
+#define DSP_STATE_CRASHED 0x0
+#define DSP_STATE_CRASH_DUMP_DONE 0x1
+
+#endif
diff --git a/include/sound/qdsp6v2/q6voice.h b/include/sound/qdsp6v2/q6voice.h
new file mode 100644
index 0000000000000..7165998e2efae
--- /dev/null
+++ b/include/sound/qdsp6v2/q6voice.h
@@ -0,0 +1,778 @@
+/* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __QDSP6VOICE_H__
+#define __QDSP6VOICE_H__
+
+#include <mach/qdsp6v2/apr.h>
+
+/* Device Event */
+#define DEV_CHANGE_READY 0x1
+
+#define VOICE_CALL_START 0x1
+#define VOICE_CALL_END 0
+
+#define VOICE_DEV_ENABLED 0x1
+#define VOICE_DEV_DISABLED 0
+
+#define MAX_VOC_PKT_SIZE 642
+
+#define SESSION_NAME_LEN 20
+
+struct voice_header {
+ uint32_t id;
+ uint32_t data_len;
+};
+
+struct voice_init {
+ struct voice_header hdr;
+ void *cb_handle;
+};
+
+
+/* Device information payload structure */
+
+struct device_data {
+ uint32_t dev_acdb_id;
+ uint32_t volume; /* in percentage */
+ uint32_t mute;
+ uint32_t sample;
+ uint32_t enabled;
+ uint32_t dev_id;
+ uint32_t dev_port_id;
+};
+
+enum {
+ VOC_INIT = 0,
+ VOC_RUN,
+ VOC_CHANGE,
+ VOC_RELEASE,
+};
+
+/* TO MVM commands */
+#define VSS_IMVM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x000110FF
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110FE
+/* Create a new full control MVM session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_ATTACH_STREAM 0x0001123C
+/* Attach a stream to the MVM. */
+
+#define VSS_IMVM_CMD_DETACH_STREAM 0x0001123D
+/* Detach a stream from the MVM. */
+
+#define VSS_IMVM_CMD_ATTACH_VOCPROC 0x0001123E
+/* Attach a vocproc to the MVM. The MVM will symmetrically connect this vocproc
+ * to all the streams currently attached to it.
+ */
+
+#define VSS_IMVM_CMD_DETACH_VOCPROC 0x0001123F
+/* Detach a vocproc from the MVM. The MVM will symmetrically disconnect this
+ * vocproc from all the streams to which it is currently attached.
+ */
+
+#define VSS_IMVM_CMD_START_VOICE 0x00011190
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IMVM_CMD_STOP_VOICE 0x00011192
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_ATTACH_VOCPROC 0x000110F8
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_DETACH_VOCPROC 0x000110F9
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+
+#define VSS_ISTREAM_CMD_SET_TTY_MODE 0x00011196
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ICOMMON_CMD_SET_NETWORK 0x0001119C
+/* Set the network type. */
+
+#define VSS_ICOMMON_CMD_SET_VOICE_TIMING 0x000111E0
+/* Set the voice timing parameters. */
+
+struct vss_imvm_cmd_create_control_session_t {
+ char name[SESSION_NAME_LEN];
+ /*
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __packed;
+
+struct vss_istream_cmd_set_tty_mode_t {
+ uint32_t mode;
+ /**<
+ * TTY mode.
+ *
+ * 0 : TTY disabled
+ * 1 : HCO
+ * 2 : VCO
+ * 3 : FULL
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_attach_vocproc_t {
+ uint16_t handle;
+ /**< Handle of vocproc being attached. */
+} __attribute__((packed));
+
+struct vss_istream_cmd_detach_vocproc_t {
+ uint16_t handle;
+ /**< Handle of vocproc being detached. */
+} __attribute__((packed));
+
+struct vss_imvm_cmd_attach_stream_t {
+ uint16_t handle;
+ /* The stream handle to attach. */
+} __attribute__((packed));
+
+struct vss_imvm_cmd_detach_stream_t {
+ uint16_t handle;
+ /* The stream handle to detach. */
+} __attribute__((packed));
+
+struct vss_icommon_cmd_set_network_t {
+ uint32_t network_id;
+ /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+} __attribute__((packed));
+
+struct vss_icommon_cmd_set_voice_timing_t {
+ uint16_t mode;
+ /*
+ * The vocoder frame synchronization mode.
+ *
+ * 0 : No frame sync.
+ * 1 : Hard VFR (20ms Vocoder Frame Reference interrupt).
+ */
+ uint16_t enc_offset;
+ /*
+ * The offset in microseconds from the VFR to deliver a Tx vocoder
+ * packet. The offset should be less than 20000us.
+ */
+ uint16_t dec_req_offset;
+ /*
+ * The offset in microseconds from the VFR to request for an Rx vocoder
+ * packet. The offset should be less than 20000us.
+ */
+ uint16_t dec_offset;
+ /*
+ * The offset in microseconds from the VFR to indicate the deadline to
+ * receive an Rx vocoder packet. The offset should be less than 20000us.
+ * Rx vocoder packets received after this deadline are not guaranteed to
+ * be processed.
+ */
+} __attribute__((packed));
+
+struct mvm_attach_vocproc_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_attach_vocproc_t mvm_attach_cvp_handle;
+} __attribute__((packed));
+
+struct mvm_detach_vocproc_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_detach_vocproc_t mvm_detach_cvp_handle;
+} __attribute__((packed));
+
+struct mvm_create_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_create_control_session_t mvm_session;
+} __packed;
+
+struct mvm_set_tty_mode_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_tty_mode_t tty_mode;
+} __attribute__((packed));
+
+struct mvm_attach_stream_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_attach_stream_t attach_stream;
+} __attribute__((packed));
+
+struct mvm_detach_stream_cmd {
+ struct apr_hdr hdr;
+ struct vss_imvm_cmd_detach_stream_t detach_stream;
+} __attribute__((packed));
+
+struct mvm_set_network_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_network_t network;
+} __attribute__((packed));
+
+struct mvm_set_voice_timing_cmd {
+ struct apr_hdr hdr;
+ struct vss_icommon_cmd_set_voice_timing_t timing;
+} __attribute__((packed));
+
+/* TO CVS commands */
+#define VSS_ISTREAM_CMD_CREATE_PASSIVE_CONTROL_SESSION 0x00011140
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_ISTREAM_CMD_CREATE_FULL_CONTROL_SESSION 0x000110F7
+/* Create a new full control stream session. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+
+#define VSS_ISTREAM_CMD_CACHE_CALIBRATION_DATA 0x000110FB
+
+#define VSS_ISTREAM_CMD_SET_MUTE 0x00011022
+
+#define VSS_ISTREAM_CMD_SET_MEDIA_TYPE 0x00011186
+/* Set media type on the stream. */
+
+#define VSS_ISTREAM_EVT_SEND_ENC_BUFFER 0x00011015
+/* Event sent by the stream to its client to provide an encoded packet. */
+
+#define VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER 0x00011017
+/* Event sent by the stream to its client requesting for a decoder packet.
+ * The client should respond with a VSS_ISTREAM_EVT_SEND_DEC_BUFFER event.
+ */
+
+#define VSS_ISTREAM_EVT_SEND_DEC_BUFFER 0x00011016
+/* Event sent by the client to the stream in response to a
+ * VSS_ISTREAM_EVT_REQUEST_DEC_BUFFER event, providing a decoder packet.
+ */
+
+#define VSS_ISTREAM_CMD_VOC_AMR_SET_ENC_RATE 0x0001113E
+/* Set AMR encoder rate. */
+
+#define VSS_ISTREAM_CMD_VOC_AMRWB_SET_ENC_RATE 0x0001113F
+/* Set AMR-WB encoder rate. */
+
+#define VSS_ISTREAM_CMD_CDMA_SET_ENC_MINMAX_RATE 0x00011019
+/* Set encoder minimum and maximum rate. */
+
+#define VSS_ISTREAM_CMD_SET_ENC_DTX_MODE 0x0001101D
+/* Set encoder DTX mode. */
+
+#define VSS_ISTREAM_CMD_START_RECORD 0x00011236
+/* Start in-call conversation recording. */
+
+#define VSS_ISTREAM_CMD_STOP_RECORD 0x00011237
+/* Stop in-call conversation recording. */
+
+#define VSS_ISTREAM_CMD_START_PLAYBACK 0x00011238
+/* Start in-call music delivery on the Tx voice path. */
+
+#define VSS_ISTREAM_CMD_STOP_PLAYBACK 0x00011239
+/* Stop the in-call music delivery on the Tx voice path. */
+
+struct vss_istream_cmd_create_passive_control_session_t {
+ char name[SESSION_NAME_LEN];
+ /**<
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_set_mute_t {
+ uint16_t direction;
+ /**<
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and Rx
+ */
+ uint16_t mute_flag;
+ /**<
+ * Mute, un-mute.
+ *
+ * 0 : Silence disable
+ * 1 : Silence enable
+ * 2 : CNG enable. Applicable to TX only. If set on RX behavior
+ * will be the same as 1
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_create_full_control_session_t {
+ uint16_t direction;
+ /*
+ * Stream direction.
+ *
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and RX
+ * 3 : TX and RX loopback
+ */
+ uint32_t enc_media_type;
+ /* Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t dec_media_type;
+ /* Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t network_id;
+ /* Network ID. (Refer to VSS_NETWORK_ID_XXX). */
+ char name[SESSION_NAME_LEN];
+ /*
+ * A variable-sized stream name.
+ *
+ * The stream name size is the payload size minus the size of the other
+ * fields.
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_set_media_type_t {
+ uint32_t rx_media_id;
+ /* Set the Rx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+ uint32_t tx_media_id;
+ /* Set the Tx vocoder type. (Refer to VSS_MEDIA_ID_XXX). */
+} __attribute__((packed));
+
+struct vss_istream_evt_send_enc_buffer_t {
+ uint32_t media_id;
+ /* Media ID of the packet. */
+ uint8_t packet_data[MAX_VOC_PKT_SIZE];
+ /* Packet data buffer. */
+} __attribute__((packed));
+
+struct vss_istream_evt_send_dec_buffer_t {
+ uint32_t media_id;
+ /* Media ID of the packet. */
+ uint8_t packet_data[MAX_VOC_PKT_SIZE];
+ /* Packet data. */
+} __attribute__((packed));
+
+struct vss_istream_cmd_voc_amr_set_enc_rate_t {
+ uint32_t mode;
+ /* Set the AMR encoder rate.
+ *
+ * 0x00000000 : 4.75 kbps
+ * 0x00000001 : 5.15 kbps
+ * 0x00000002 : 5.90 kbps
+ * 0x00000003 : 6.70 kbps
+ * 0x00000004 : 7.40 kbps
+ * 0x00000005 : 7.95 kbps
+ * 0x00000006 : 10.2 kbps
+ * 0x00000007 : 12.2 kbps
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_voc_amrwb_set_enc_rate_t {
+ uint32_t mode;
+ /* Set the AMR-WB encoder rate.
+ *
+ * 0x00000000 : 6.60 kbps
+ * 0x00000001 : 8.85 kbps
+ * 0x00000002 : 12.65 kbps
+ * 0x00000003 : 14.25 kbps
+ * 0x00000004 : 15.85 kbps
+ * 0x00000005 : 18.25 kbps
+ * 0x00000006 : 19.85 kbps
+ * 0x00000007 : 23.05 kbps
+ * 0x00000008 : 23.85 kbps
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_cdma_set_enc_minmax_rate_t {
+ uint16_t min_rate;
+ /* Set the lower bound encoder rate.
+ *
+ * 0x0000 : Blank frame
+ * 0x0001 : Eighth rate
+ * 0x0002 : Quarter rate
+ * 0x0003 : Half rate
+ * 0x0004 : Full rate
+ */
+ uint16_t max_rate;
+ /* Set the upper bound encoder rate.
+ *
+ * 0x0000 : Blank frame
+ * 0x0001 : Eighth rate
+ * 0x0002 : Quarter rate
+ * 0x0003 : Half rate
+ * 0x0004 : Full rate
+ */
+} __attribute__((packed));
+
+struct vss_istream_cmd_set_enc_dtx_mode_t {
+ uint32_t enable;
+ /* Toggle DTX on or off.
+ *
+ * 0 : Disables DTX
+ * 1 : Enables DTX
+ */
+} __attribute__((packed));
+
+#define VSS_TAP_POINT_NONE 0x00010F78
+/* Indicates no tapping for specified path. */
+
+#define VSS_TAP_POINT_STREAM_END 0x00010F79
+/* Indicates that specified path should be tapped at the end of the stream. */
+
+struct vss_istream_cmd_start_record_t {
+ uint32_t rx_tap_point;
+ /* Tap point to use on the Rx path. Supported values are:
+ * VSS_TAP_POINT_NONE : Do not record Rx path.
+ * VSS_TAP_POINT_STREAM_END : Rx tap point is at the end of the stream.
+ */
+ uint32_t tx_tap_point;
+ /* Tap point to use on the Tx path. Supported values are:
+ * VSS_TAP_POINT_NONE : Do not record tx path.
+ * VSS_TAP_POINT_STREAM_END : Tx tap point is at the end of the stream.
+ */
+} __attribute__((packed));
+
+struct cvs_create_passive_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_create_passive_control_session_t cvs_session;
+} __attribute__((packed));
+
+struct cvs_create_full_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_create_full_control_session_t cvs_session;
+} __attribute__((packed));
+
+struct cvs_destroy_session_cmd {
+ struct apr_hdr hdr;
+} __attribute__((packed));
+
+struct cvs_cache_calibration_data_cmd {
+ struct apr_hdr hdr;
+} __attribute__ ((packed));
+
+struct cvs_set_mute_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_mute_t cvs_set_mute;
+} __attribute__((packed));
+
+struct cvs_set_media_type_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_media_type_t media_type;
+} __attribute__((packed));
+
+struct cvs_send_dec_buf_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_evt_send_dec_buffer_t dec_buf;
+} __attribute__((packed));
+
+struct cvs_set_amr_enc_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_voc_amr_set_enc_rate_t amr_rate;
+} __attribute__((packed));
+
+struct cvs_set_amrwb_enc_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_voc_amrwb_set_enc_rate_t amrwb_rate;
+} __attribute__((packed));
+
+struct cvs_set_cdma_enc_minmax_rate_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_cdma_set_enc_minmax_rate_t cdma_rate;
+} __attribute__((packed));
+
+struct cvs_set_enc_dtx_mode_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_set_enc_dtx_mode_t dtx_mode;
+} __attribute__((packed));
+
+struct cvs_start_record_cmd {
+ struct apr_hdr hdr;
+ struct vss_istream_cmd_start_record_t rec_mode;
+} __attribute__((packed));
+
+/* TO CVP commands */
+
+#define VSS_IVOCPROC_CMD_CREATE_FULL_CONTROL_SESSION 0x000100C3
+/**< Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define APRV2_IBASIC_CMD_DESTROY_SESSION 0x0001003C
+
+#define VSS_IVOCPROC_CMD_SET_DEVICE 0x000100C4
+
+#define VSS_IVOCPROC_CMD_CACHE_CALIBRATION_DATA 0x000110E3
+
+#define VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE 0x000110E4
+
+#define VSS_IVOCPROC_CMD_SET_VP3_DATA 0x000110EB
+
+#define VSS_IVOCPROC_CMD_SET_RX_VOLUME_INDEX 0x000110EE
+
+#define VSS_IVOCPROC_CMD_ENABLE 0x000100C6
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_CMD_DISABLE 0x000110E1
+/**< No payload. Wait for APRV2_IBASIC_RSP_RESULT response. */
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_NONE 0x00010F70
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_SM_ECNS 0x00010F71
+#define VSS_IVOCPROC_TOPOLOGY_ID_TX_DM_FLUENCE 0x00010F72
+
+#define VSS_IVOCPROC_TOPOLOGY_ID_RX_DEFAULT 0x00010F77
+
+/* Newtwork IDs */
+#define VSS_NETWORK_ID_DEFAULT 0x00010037
+#define VSS_NETWORK_ID_VOIP_NB 0x00011240
+#define VSS_NETWORK_ID_VOIP_WB 0x00011241
+#define VSS_NETWORK_ID_VOIP_WV 0x00011242
+
+/* Media types */
+#define VSS_MEDIA_ID_13K_MODEM 0x00010FC1
+/* Qcelp vocoder modem format */
+#define VSS_MEDIA_ID_EVRC_MODEM 0x00010FC2
+/* 80-VF690-47 CDMA enhanced variable rate vocoder modem format. */
+#define VSS_MEDIA_ID_4GV_NB_MODEM 0x00010FC3
+/* 4GV Narrowband modem format */
+#define VSS_MEDIA_ID_4GV_WB_MODEM 0x00010FC4
+/* 4GV Wideband modem format */
+#define VSS_MEDIA_ID_AMR_NB_MODEM 0x00010FC6
+/* 80-VF690-47 UMTS AMR-NB vocoder modem format. */
+#define VSS_MEDIA_ID_AMR_WB_MODEM 0x00010FC7
+/* 80-VF690-47 UMTS AMR-WB vocoder modem format. */
+#define VSS_MEDIA_ID_EFR_MODEM 0x00010FC8
+/*EFR modem format */
+#define VSS_MEDIA_ID_FR_MODEM 0x00010FC9
+/*FR modem format */
+#define VSS_MEDIA_ID_HR_MODEM 0x00010FCA
+/*HR modem format */
+#define VSS_MEDIA_ID_PCM_NB 0x00010FCB
+/* Linear PCM (16-bit, little-endian). */
+#define VSS_MEDIA_ID_PCM_WB 0x00010FCC
+/* Linear wideband PCM vocoder modem format (16 bits, little endian). */
+#define VSS_MEDIA_ID_G711_ALAW 0x00010FCD
+/* G.711 a-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G711_MULAW 0x00010FCE
+/* G.711 mu-law (contains two 10ms vocoder frames). */
+#define VSS_MEDIA_ID_G729 0x00010FD0
+/* G.729AB (contains two 10ms vocoder frames. */
+
+#define VOICE_CMD_SET_PARAM 0x00011006
+#define VOICE_CMD_GET_PARAM 0x00011007
+#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
+struct vss_ivocproc_cmd_create_full_control_session_t {
+ uint16_t direction;
+ /*
+ * stream direction.
+ * 0 : TX only
+ * 1 : RX only
+ * 2 : TX and RX
+ */
+ uint32_t tx_port_id;
+ /*
+ * TX device port ID which vocproc will connect to. If not supplying a
+ * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+ */
+ uint32_t tx_topology_id;
+ /*
+ * Tx leg topology ID. If not supplying a topology ID set to
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+ */
+ uint32_t rx_port_id;
+ /*
+ * RX device port ID which vocproc will connect to. If not supplying a
+ * port ID set to VSS_IVOCPROC_PORT_ID_NONE.
+ */
+ uint32_t rx_topology_id;
+ /*
+ * Rx leg topology ID. If not supplying a topology ID set to
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE.
+ */
+ int32_t network_id;
+ /*
+ * Network ID. (Refer to VSS_NETWORK_ID_XXX). If not supplying a network
+ * ID set to VSS_NETWORK_ID_DEFAULT.
+ */
+} __attribute__((packed));
+
+struct vss_ivocproc_cmd_set_device_t {
+ uint32_t tx_port_id;
+ /**<
+ * TX device port ID which vocproc will connect to.
+ * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+ */
+ uint32_t tx_topology_id;
+ /**<
+ * TX leg topology ID.
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+ * pre/post-processing blocks and is pass-through.
+ */
+ int32_t rx_port_id;
+ /**<
+ * RX device port ID which vocproc will connect to.
+ * VSS_IVOCPROC_PORT_ID_NONE means vocproc will not connect to any port.
+ */
+ uint32_t rx_topology_id;
+ /**<
+ * RX leg topology ID.
+ * VSS_IVOCPROC_TOPOLOGY_ID_NONE means vocproc does not contain any
+ * pre/post-processing blocks and is pass-through.
+ */
+} __attribute__((packed));
+
+struct vss_ivocproc_cmd_set_volume_index_t {
+ uint16_t vol_index;
+ /**<
+ * Volume index utilized by the vocproc to index into the volume table
+ * provided in VSS_IVOCPROC_CMD_CACHE_VOLUME_CALIBRATION_TABLE and set
+ * volume on the VDSP.
+ */
+} __attribute__((packed));
+
+struct cvp_create_full_ctl_session_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_create_full_control_session_t cvp_session;
+} __attribute__ ((packed));
+
+struct cvp_command {
+ struct apr_hdr hdr;
+} __attribute__((packed));
+
+struct cvp_set_device_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_device_t cvp_set_device;
+} __attribute__ ((packed));
+
+struct cvp_cache_calibration_data_cmd {
+ struct apr_hdr hdr;
+} __attribute__((packed));
+
+struct cvp_cache_volume_calibration_table_cmd {
+ struct apr_hdr hdr;
+} __attribute__((packed));
+
+struct cvp_set_vp3_data_cmd {
+ struct apr_hdr hdr;
+} __attribute__((packed));
+
+struct cvp_set_rx_volume_index_cmd {
+ struct apr_hdr hdr;
+ struct vss_ivocproc_cmd_set_volume_index_t cvp_set_vol_idx;
+} __attribute__((packed));
+
+/* CB for up-link packets. */
+typedef void (*ul_cb_fn)(uint8_t *voc_pkt,
+ uint32_t pkt_len,
+ void *private_data);
+
+/* CB for down-link packets. */
+typedef void (*dl_cb_fn)(uint8_t *voc_pkt,
+ uint32_t *pkt_len,
+ void *private_data);
+
+struct q_min_max_rate {
+ uint32_t min_rate;
+ uint32_t max_rate;
+};
+
+struct mvs_driver_info {
+ uint32_t media_type;
+ uint32_t rate;
+ uint32_t network_type;
+ uint32_t dtx_mode;
+ struct q_min_max_rate q_min_max_rate;
+ ul_cb_fn ul_cb;
+ dl_cb_fn dl_cb;
+ void *private_data;
+};
+
+struct incall_rec_info {
+ uint32_t pending;
+ uint32_t rec_mode;
+};
+
+struct incall_music_info {
+ uint32_t pending;
+ uint32_t playing;
+};
+
+struct voice_data {
+ int voc_state;/*INIT, CHANGE, RELEASE, RUN */
+
+ wait_queue_head_t mvm_wait;
+ wait_queue_head_t cvs_wait;
+ wait_queue_head_t cvp_wait;
+
+ /* cache the values related to Rx and Tx */
+ struct device_data dev_rx;
+ struct device_data dev_tx;
+
+ /* call status */
+ int v_call_status; /* Start or End */
+
+ u32 mvm_state;
+ u32 cvs_state;
+ u32 cvp_state;
+
+ /* Handle to MVM */
+ u16 mvm_handle;
+ /* Handle to CVS */
+ u16 cvs_handle;
+ /* Handle to CVP */
+ u16 cvp_handle;
+
+ struct mutex lock;
+
+ struct incall_rec_info rec_info;
+
+ struct incall_music_info music_info;
+
+ u16 session_id;
+};
+
+#define MAX_VOC_SESSIONS 2
+#define SESSION_ID_BASE 0xFFF0
+
+struct common_data {
+ uint32_t voc_path;
+ uint32_t adsp_version;
+ uint32_t device_events;
+
+ /* These default values are for all devices */
+ uint32_t default_mute_val;
+ uint32_t default_vol_val;
+ uint32_t default_sample_val;
+
+ /* APR to MVM in the modem */
+ void *apr_mvm;
+ /* APR to CVS in the modem */
+ void *apr_cvs;
+ /* APR to CVP in the modem */
+ void *apr_cvp;
+
+ /* APR to MVM in the Q6 */
+ void *apr_q6_mvm;
+ /* APR to CVS in the Q6 */
+ void *apr_q6_cvs;
+ /* APR to CVP in the Q6 */
+ void *apr_q6_cvp;
+
+ struct mutex common_lock;
+
+ struct mvs_driver_info mvs_info;
+
+ struct voice_data voice[MAX_VOC_SESSIONS];
+};
+
+int voice_set_voc_path_full(uint32_t set);
+
+void voice_register_mvs_cb(ul_cb_fn ul_cb,
+ dl_cb_fn dl_cb,
+ void *private_data);
+
+void voice_config_vocoder(uint32_t media_type,
+ uint32_t rate,
+ uint32_t network_type,
+ uint32_t dtx_mode,
+ struct q_min_max_rate q_min_max_rate);
+
+int voice_start_record(uint32_t rec_mode, uint32_t set);
+
+int voice_start_playback(uint32_t set);
+
+u16 voice_get_session_id(const char *name);
+#endif
diff --git a/include/sound/qdsp6v2/rtac.h b/include/sound/qdsp6v2/rtac.h
new file mode 100644
index 0000000000000..07be428f14d39
--- /dev/null
+++ b/include/sound/qdsp6v2/rtac.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __RTAC_H__
+#define __RTAC_H__
+
+/* Voice Modes */
+#define RTAC_CVP 0
+#define RTAC_CVS 1
+#define RTAC_VOICE_MODES 2
+
+void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id);
+void rtac_remove_adm_device(u32 port_id);
+void rtac_remove_popp_from_adm_devices(u32 popp_id);
+void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port,
+ u32 tx_afe_port, u32 session_id);
+void rtac_remove_voice(u32 cvs_handle);
+void rtac_set_adm_handle(void *handle);
+bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size);
+void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size);
+void rtac_set_asm_handle(u32 session_id, void *handle);
+bool rtac_make_asm_callback(u32 session_id, uint32_t *payload,
+ u32 payload_size);
+void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size);
+void rtac_set_voice_handle(u32 mode, void *handle);
+bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size);
+void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size);
+
+#endif
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
new file mode 100644
index 0000000000000..5615d257a0b6f
--- /dev/null
+++ b/include/trace/events/trace_msm_bus.h
@@ -0,0 +1,146 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_bus
+
+#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_BUS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bus_update_request,
+
+ TP_PROTO(int sec, int nsec, const char *name, int src, int dest,
+ unsigned long long ab, unsigned long long ib),
+
+ TP_ARGS(sec, nsec, name, src, dest, ab, ib),
+
+ TP_STRUCT__entry(
+ __field(int, sec)
+ __field(int, nsec)
+ __string(name, name)
+ __field(int, src)
+ __field(int, dest)
+ __field(u64, ab)
+ __field(u64, ib)
+ ),
+
+ TP_fast_assign(
+ __entry->sec = sec;
+ __entry->nsec = nsec;
+ __assign_str(name, name);
+ __entry->src = src;
+ __entry->dest = dest;
+ __entry->ab = ab;
+ __entry->ib = ib;
+ ),
+
+ TP_printk("time= %d.%d name=%s src=%d dest=%d ab=%llu ib=%llu",
+ __entry->sec,
+ __entry->nsec,
+ __get_str(name),
+ __entry->src,
+ __entry->dest,
+ (unsigned long long)__entry->ab,
+ (unsigned long long)__entry->ib)
+);
+
+TRACE_EVENT(bus_update_request_end,
+
+ TP_PROTO(const char *name),
+
+ TP_ARGS(name),
+
+ TP_STRUCT__entry(
+ __string(name, name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, name);
+ ),
+
+ TP_printk("client-name=%s", __get_str(name))
+);
+
+TRACE_EVENT(bus_bimc_config_limiter,
+
+ TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
+
+ TP_ARGS(mas_id, cur_lim_bw),
+
+ TP_STRUCT__entry(
+ __field(int, mas_id)
+ __field(u64, cur_lim_bw)
+ ),
+
+ TP_fast_assign(
+ __entry->mas_id = mas_id;
+ __entry->cur_lim_bw = cur_lim_bw;
+ ),
+
+ TP_printk("Master=%d cur_lim_bw=%llu",
+ __entry->mas_id,
+ (unsigned long long)__entry->cur_lim_bw)
+);
+
+TRACE_EVENT(bus_avail_bw,
+
+ TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
+
+ TP_ARGS(cur_bimc_bw, cur_mdp_bw),
+
+ TP_STRUCT__entry(
+ __field(u64, cur_bimc_bw)
+ __field(u64, cur_mdp_bw)
+ ),
+
+ TP_fast_assign(
+ __entry->cur_bimc_bw = cur_bimc_bw;
+ __entry->cur_mdp_bw = cur_mdp_bw;
+ ),
+
+ TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
+ (unsigned long long)__entry->cur_bimc_bw,
+ (unsigned long long)__entry->cur_mdp_bw)
+);
+
+TRACE_EVENT(bus_bke_params,
+
+ TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
+
+ TP_ARGS(gc, gp, thl, thm, thh),
+
+ TP_STRUCT__entry(
+ __field(u32, gc)
+ __field(u32, gp)
+ __field(u32, thl)
+ __field(u32, thm)
+ __field(u32, thh)
+ ),
+
+ TP_fast_assign(
+ __entry->gc = gc;
+ __entry->gp = gp;
+ __entry->thl = thl;
+ __entry->thm = thm;
+ __entry->thh = thh;
+ ),
+
+ TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
+ __entry->gc, __entry->gp, __entry->thl, __entry->thm,
+ __entry->thh)
+);
+
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_bus
+#include <trace/define_trace.h>
diff --git a/kernel/configs/distro.config b/kernel/configs/distro.config
new file mode 100644
index 0000000000000..7a5be25356fd2
--- /dev/null
+++ b/kernel/configs/distro.config
@@ -0,0 +1,234 @@
+# USB camera
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_DTCS033=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STK1135=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TOUPTEK=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+
+# USB serial
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_F81232=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_METRO=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MXUPORT=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QCAUX=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_SYMBOL=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_XSENS_MT=m
+CONFIG_USB_SERIAL_WISHBONE=m
+CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_QT2=m
+
+# USB gadget
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_NEW_LEDS=y
+
+# USB Eth
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_NET_SR9700=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_MCS7830=m
+
+# LEDs
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_MAC80211_LEDS=y
+
+# systemd
+CONFIG_IPV6=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_NS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_DEVTMPFS=y
+CONFIG_CGROUPS=y
+CONFIG_INOTIFY_USER=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EPOLL=y
+CONFIG_NET=y
+CONFIG_SYSFS=y
+CONFIG_PROC_FS=y
+CONFIG_FHANDLE=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_DEBUG=y
+
+# NFS server
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+
+# HID
+CONFIG_HID_APPLE=y
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_PLANTRONICS=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_HID_MULTITOUCH=m
+
+#misc
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_STACKTRACE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_PROVE_LOCKING=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_RFKILL=y
+CONFIG_BINFMT_MISC=m
+CONFIG_DYNAMIC_DEBUG=y
+
+# PPP
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+
+# input
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
diff --git a/sound/core/pcm_iec958.c b/sound/core/pcm_iec958.c
index 36b2d7aca1bdc..c9f8b6641f7bc 100644
--- a/sound/core/pcm_iec958.c
+++ b/sound/core/pcm_iec958.c
@@ -9,30 +9,18 @@
#include <linux/types.h>
#include <sound/asoundef.h>
#include <sound/pcm.h>
+#include <sound/pcm_params.h>
#include <sound/pcm_iec958.h>
-/**
- * snd_pcm_create_iec958_consumer - create consumer format IEC958 channel status
- * @runtime: pcm runtime structure with ->rate filled in
- * @cs: channel status buffer, at least four bytes
- * @len: length of channel status buffer
- *
- * Create the consumer format channel status data in @cs of maximum size
- * @len corresponding to the parameters of the PCM runtime @runtime.
- *
- * Drivers may wish to tweak the contents of the buffer after creation.
- *
- * Returns: length of buffer, or negative error code if something failed.
- */
-int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
- size_t len)
+static int create_iec958_consumer(uint rate, uint sample_width,
+ u8 *cs, size_t len)
{
unsigned int fs, ws;
if (len < 4)
return -EINVAL;
- switch (runtime->rate) {
+ switch (rate) {
case 32000:
fs = IEC958_AES3_CON_FS_32000;
break;
@@ -59,7 +47,7 @@ int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
}
if (len > 4) {
- switch (snd_pcm_format_width(runtime->format)) {
+ switch (sample_width) {
case 16:
ws = IEC958_AES4_CON_WORDLEN_20_16;
break;
@@ -92,4 +80,34 @@ int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
return len;
}
+
+/**
+ * snd_pcm_create_iec958_consumer - create consumer format IEC958 channel status
+ * @runtime: pcm runtime structure with ->rate filled in
+ * @cs: channel status buffer, at least four bytes
+ * @len: length of channel status buffer
+ *
+ * Create the consumer format channel status data in @cs of maximum size
+ * @len corresponding to the parameters of the PCM runtime @runtime.
+ *
+ * Drivers may wish to tweak the contents of the buffer after creation.
+ *
+ * Returns: length of buffer, or negative error code if something failed.
+ */
+int snd_pcm_create_iec958_consumer(struct snd_pcm_runtime *runtime, u8 *cs,
+ size_t len)
+{
+ return create_iec958_consumer(runtime->rate,
+ snd_pcm_format_width(runtime->format),
+ cs, len);
+}
EXPORT_SYMBOL(snd_pcm_create_iec958_consumer);
+
+
+int snd_pcm_create_iec958_consumer_hw_params(struct snd_pcm_hw_params *params,
+ u8 *cs, size_t len)
+{
+ return create_iec958_consumer(params_rate(params), params_width(params),
+ cs, len);
+}
+EXPORT_SYMBOL(snd_pcm_create_iec958_consumer_hw_params);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 7ef3a0c16478d..042ee398b04d1 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -88,6 +88,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MC13783 if MFD_MC13XXX
select SND_SOC_ML26124 if I2C
select SND_SOC_NAU8825 if I2C
+ select SND_SOC_HDMI_CODEC
select SND_SOC_PCM1681 if I2C
select SND_SOC_PCM179X_I2C if I2C
select SND_SOC_PCM179X_SPI if SPI_MASTER
@@ -477,6 +478,11 @@ config SND_SOC_BT_SCO
config SND_SOC_DMIC
tristate
+config SND_SOC_HDMI_CODEC
+ tristate
+ select SND_PCM_ELD
+ select SND_PCM_IEC958
+
config SND_SOC_ES8328
tristate "Everest Semi ES8328 CODEC"
@@ -534,6 +540,10 @@ config SND_SOC_MAX98926
config SND_SOC_MAX9850
tristate
+config SND_SOC_MSM8x16_WCD
+ tristate "Qualcomm MSM8x16 WCD"
+ depends on SPMI && MFD_SYSCON
+
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 185a712a7fe76..33a4c43d61151 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -81,6 +81,8 @@ snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
snd-soc-nau8825-objs := nau8825.o
+snd-soc-hdmi-codec-objs := hdmi-codec.o
+snd-soc-msm8x16-objs := msm8x16-wcd.o msm8x16-wcd-tables.o
snd-soc-pcm1681-objs := pcm1681.o
snd-soc-pcm179x-codec-objs := pcm179x.o
snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
@@ -199,7 +201,6 @@ snd-soc-wm9705-objs := wm9705.o
snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
-
# Amp
snd-soc-max9877-objs := max9877.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
@@ -290,6 +291,8 @@ obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
+obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
+obj-$(CONFIG_SND_SOC_MSM8x16_WCD) +=snd-soc-msm8x16.o
obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
obj-$(CONFIG_SND_SOC_PCM179X) += snd-soc-pcm179x-codec.o
obj-$(CONFIG_SND_SOC_PCM179X_I2C) += snd-soc-pcm179x-i2c.o
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
new file mode 100644
index 0000000000000..687332df61f90
--- /dev/null
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -0,0 +1,404 @@
+/*
+ * ALSA SoC codec for HDMI encoder drivers
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <jsarha@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/pcm_drm_eld.h>
+#include <sound/hdmi-codec.h>
+#include <sound/pcm_iec958.h>
+
+#include <drm/drm_crtc.h> /* This is only to get MAX_ELD_BYTES */
+
+struct hdmi_codec_priv {
+ struct hdmi_codec_pdata hcd;
+ struct snd_soc_dai_driver *daidrv;
+ struct hdmi_codec_daifmt daifmt[2];
+ struct mutex current_stream_lock;
+ struct snd_pcm_substream *current_stream;
+ struct snd_pcm_hw_constraint_list ratec;
+ uint8_t eld[MAX_ELD_BYTES];
+};
+
+static const struct snd_soc_dapm_widget hdmi_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route hdmi_routes[] = {
+ { "TX", NULL, "Playback" },
+};
+
+enum {
+ DAI_ID_I2S = 0,
+ DAI_ID_SPDIF,
+};
+
+static void hdmi_codec_abort(struct device *dev)
+{
+ struct hdmi_codec_priv *hcp = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ mutex_lock(&hcp->current_stream_lock);
+ if (hcp->current_stream && hcp->current_stream->runtime &&
+ snd_pcm_running(hcp->current_stream)) {
+ dev_info(dev, "HDMI audio playback aborted\n");
+ snd_pcm_stream_lock_irq(hcp->current_stream);
+ snd_pcm_stop(hcp->current_stream, SNDRV_PCM_STATE_DISCONNECTED);
+ snd_pcm_stream_unlock_irq(hcp->current_stream);
+ }
+ mutex_unlock(&hcp->current_stream_lock);
+}
+
+static int hdmi_codec_new_stream(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ mutex_lock(&hcp->current_stream_lock);
+ if (!hcp->current_stream) {
+ hcp->current_stream = substream;
+ } else if (hcp->current_stream != substream) {
+ dev_err(dai->dev, "Only one simultaneous stream supported!\n");
+ ret = -EINVAL;
+ }
+ mutex_unlock(&hcp->current_stream_lock);
+
+ return ret;
+}
+
+static int hdmi_codec_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s()\n", __func__);
+
+ ret = hdmi_codec_new_stream(substream, dai);
+ if (ret)
+ return ret;
+
+ if (hcp->hcd.ops->audio_startup) {
+ ret = hcp->hcd.ops->audio_startup(dai->dev->parent,
+ hdmi_codec_abort);
+ if (ret) {
+ mutex_lock(&hcp->current_stream_lock);
+ hcp->current_stream = NULL;
+ mutex_unlock(&hcp->current_stream_lock);
+ return ret;
+ }
+ }
+
+ if (hcp->hcd.ops->get_eld) {
+ ret = hcp->hcd.ops->get_eld(dai->dev->parent, hcp->eld,
+ sizeof(hcp->eld));
+
+ if (!ret) {
+ ret = snd_pcm_hw_constraint_eld(substream->runtime,
+ hcp->eld);
+ if (ret)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void hdmi_codec_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "%s()\n", __func__);
+
+ WARN_ON(hcp->current_stream != substream);
+
+ hcp->hcd.ops->audio_shutdown(dai->dev->parent);
+
+ mutex_lock(&hcp->current_stream_lock);
+ hcp->current_stream = NULL;
+ mutex_unlock(&hcp->current_stream_lock);
+}
+
+static int hdmi_codec_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ struct hdmi_codec_params hp = {
+ .iec = {
+ .status = { 0 },
+ .subcode = { 0 },
+ .pad = 0,
+ .dig_subframe = { 0 },
+ }
+ };
+ int ret;
+
+ dev_dbg(dai->dev, "%s() width %d rate %d channels %d\n", __func__,
+ params_width(params), params_rate(params),
+ params_channels(params));
+
+ ret = snd_pcm_create_iec958_consumer_hw_params(params, hp.iec.status,
+ sizeof(hp.iec.status));
+ if (ret < 0) {
+ dev_err(dai->dev, "Creating IEC958 channel status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = hdmi_codec_new_stream(substream, dai);
+ if (ret)
+ return ret;
+
+ hdmi_audio_infoframe_init(&hp.cea);
+ hp.cea.channels = params_channels(params);
+ hp.cea.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
+ hp.cea.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
+ hp.cea.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
+
+ hp.sample_width = params_width(params);
+ hp.sample_rate = params_rate(params);
+ hp.channels = params_channels(params);
+
+ return hcp->hcd.ops->hw_params(dai->dev->parent, &hcp->daifmt[dai->id],
+ &hp);
+}
+
+static int hdmi_codec_set_fmt(struct snd_soc_dai *dai,
+ unsigned int fmt)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+ struct hdmi_codec_daifmt cf = { 0 };
+ int ret = 0;
+
+ dev_dbg(dai->dev, "%s()\n", __func__);
+
+ if (dai->id == DAI_ID_SPDIF) {
+ cf.fmt = HDMI_SPDIF;
+ } else {
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ cf.bit_clk_master = 1;
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFM:
+ cf.frame_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ cf.bit_clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ cf.frame_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ cf.bit_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ cf.frame_clk_inv = 1;
+ cf.bit_clk_inv = 1;
+ break;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ cf.fmt = HDMI_I2S;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ cf.fmt = HDMI_DSP_A;
+ break;
+ case SND_SOC_DAIFMT_DSP_B:
+ cf.fmt = HDMI_DSP_B;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ cf.fmt = HDMI_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ cf.fmt = HDMI_LEFT_J;
+ break;
+ case SND_SOC_DAIFMT_AC97:
+ cf.fmt = HDMI_AC97;
+ break;
+ default:
+ dev_err(dai->dev, "Invalid DAI interface format\n");
+ return -EINVAL;
+ }
+ }
+
+ hcp->daifmt[dai->id] = cf;
+
+ return ret;
+}
+
+static int hdmi_codec_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai);
+
+ dev_dbg(dai->dev, "%s()\n", __func__);
+
+ if (hcp->hcd.ops->digital_mute)
+ return hcp->hcd.ops->digital_mute(dai->dev->parent, mute);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops hdmi_dai_ops = {
+ .startup = hdmi_codec_startup,
+ .shutdown = hdmi_codec_shutdown,
+ .hw_params = hdmi_codec_hw_params,
+ .set_fmt = hdmi_codec_set_fmt,
+ .digital_mute = hdmi_codec_digital_mute,
+};
+
+
+#define HDMI_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |\
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 |\
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_176400 |\
+ SNDRV_PCM_RATE_192000)
+
+#define SPDIF_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE)
+
+#define I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |\
+ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S18_3BE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S20_3BE |\
+ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_3BE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S24_BE |\
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE)
+
+static struct snd_soc_dai_driver hdmi_i2s_dai = {
+ .name = "i2s-hifi",
+ .id = DAI_ID_I2S,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = HDMI_RATES,
+ .formats = I2S_FORMATS,
+ .sig_bits = 24,
+ },
+ .ops = &hdmi_dai_ops,
+};
+
+static const struct snd_soc_dai_driver hdmi_spdif_dai = {
+ .name = "spdif-hifi",
+ .id = DAI_ID_SPDIF,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = HDMI_RATES,
+ .formats = SPDIF_FORMATS,
+ },
+ .ops = &hdmi_dai_ops,
+};
+
+static struct snd_soc_codec_driver hdmi_codec = {
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+ .dapm_routes = hdmi_routes,
+ .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
+};
+
+static int hdmi_codec_probe(struct platform_device *pdev)
+{
+ struct hdmi_codec_pdata *hcd = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct hdmi_codec_priv *hcp;
+ int dai_count, i = 0;
+ int ret;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (!hcd) {
+ dev_err(dev, "%s: No plalform data\n", __func__);
+ return -EINVAL;
+ }
+
+ dai_count = hcd->i2s + hcd->spdif;
+ if (dai_count < 1 || !hcd->ops || !hcd->ops->hw_params ||
+ !hcd->ops->audio_shutdown) {
+ dev_err(dev, "%s: Invalid parameters\n", __func__);
+ return -EINVAL;
+ }
+
+ hcp = devm_kzalloc(dev, sizeof(*hcp), GFP_KERNEL);
+ if (!hcp)
+ return -ENOMEM;
+
+ hcp->hcd = *hcd;
+ mutex_init(&hcp->current_stream_lock);
+
+ hcp->daidrv = devm_kzalloc(dev, dai_count * sizeof(*hcp->daidrv),
+ GFP_KERNEL);
+ if (!hcp->daidrv)
+ return -ENOMEM;
+
+ if (hcd->i2s) {
+ hcp->daidrv[i] = hdmi_i2s_dai;
+ hcp->daidrv[i].playback.channels_max =
+ hcd->max_i2s_channels;
+ i++;
+ }
+
+ if (hcd->spdif)
+ hcp->daidrv[i] = hdmi_spdif_dai;
+
+ ret = snd_soc_register_codec(dev, &hdmi_codec, hcp->daidrv,
+ dai_count);
+ if (ret) {
+ dev_err(dev, "%s: snd_soc_register_codec() failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ dev_set_drvdata(dev, hcp);
+ return 0;
+}
+
+static int hdmi_codec_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver hdmi_codec_driver = {
+ .driver = {
+ .name = HDMI_CODEC_DRV_NAME,
+ },
+ .probe = hdmi_codec_probe,
+ .remove = hdmi_codec_remove,
+};
+
+module_platform_driver(hdmi_codec_driver);
+
+MODULE_AUTHOR("Jyri Sarha <jsarha@ti.com>");
+MODULE_DESCRIPTION("HDMI Audio Codec Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" HDMI_CODEC_DRV_NAME);
diff --git a/sound/soc/codecs/msm8x16-wcd-tables.c b/sound/soc/codecs/msm8x16-wcd-tables.c
new file mode 100644
index 0000000000000..c6b1fd62bacc1
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd-tables.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8x16_wcd_registers.h"
+#include "msm8x16-wcd.h"
+
+const u8 msm8x16_wcd_reg_readable[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_SET] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_CLR] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_MID_SEL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PRIORITY] = 1,
+ [MSM8X16_WCD_A_DIGITAL_GPIO_MODE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_0] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_SET] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_MID_SEL] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PRIORITY] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_VAL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_2_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_2_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_3_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLK] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_FBCTRL] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_BIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_VCTRL] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_ATEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG] = 1,
+ [MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT] = 1,
+ [MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BYPASS_MODE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO] = 1,
+ [MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR] = 1,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_SD_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE] = 1,
+ [MSM8X16_WCD_A_CDC_TOP_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_MUX_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_MUX_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_DMIC_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_DMIC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SEC_ACCESS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3] = 1,
+ [MSM8X16_WCD_A_ANALOG_SEC_ACCESS] = 1,
+};
+
+const u8 msm8x16_wcd_reg_readonly[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] = 1,
+};
+
+u8 msm8x16_wcd_reset_reg_defaults[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] =
+ MSM8X16_WCD_A_DIGITAL_REVISION1__POR,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] =
+ MSM8X16_WCD_A_DIGITAL_REVISION2__POR,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] =
+ MSM8X16_WCD_A_DIGITAL_PERPH_TYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] =
+ MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_RT_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] =
+ MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] =
+ MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] =
+ MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR] =
+ MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_SET] =
+ MSM8X16_WCD_A_DIGITAL_INT_EN_SET__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_CLR] =
+ MSM8X16_WCD_A_DIGITAL_INT_EN_CLR__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_MID_SEL] =
+ MSM8X16_WCD_A_DIGITAL_INT_MID_SEL__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_PRIORITY] =
+ MSM8X16_WCD_A_DIGITAL_INT_PRIORITY__POR,
+ [MSM8X16_WCD_A_DIGITAL_GPIO_MODE] =
+ MSM8X16_WCD_A_DIGITAL_GPIO_MODE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE] =
+ MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA] =
+ MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] =
+ MSM8X16_WCD_A_DIGITAL_PIN_STATUS__POR,
+ [MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL] =
+ MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_0] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_0__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_1] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_1__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_2] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_2__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] =
+ MSM8X16_WCD_A_ANALOG_REVISION1__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] =
+ MSM8X16_WCD_A_ANALOG_REVISION2__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] =
+ MSM8X16_WCD_A_ANALOG_REVISION3__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] =
+ MSM8X16_WCD_A_ANALOG_REVISION4__POR,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] =
+ MSM8X16_WCD_A_ANALOG_PERPH_TYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] =
+ MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_RT_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] =
+ MSM8X16_WCD_A_ANALOG_INT_SET_TYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] =
+ MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] =
+ MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR] =
+ MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_SET] =
+ MSM8X16_WCD_A_ANALOG_INT_EN_SET__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_CLR] =
+ MSM8X16_WCD_A_ANALOG_INT_EN_CLR__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_PENDING_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_MID_SEL] =
+ MSM8X16_WCD_A_ANALOG_INT_MID_SEL__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_PRIORITY] =
+ MSM8X16_WCD_A_ANALOG_INT_PRIORITY__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_EN] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_VAL] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_VAL__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_CTL] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_2_EN] =
+ MSM8X16_WCD_A_ANALOG_MICB_2_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] =
+ MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_1_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_2_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_2_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_3_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_3_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_EN] =
+ MSM8X16_WCD_A_ANALOG_NCP_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLK] =
+ MSM8X16_WCD_A_ANALOG_NCP_CLK__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH] =
+ MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_FBCTRL] =
+ MSM8X16_WCD_A_ANALOG_NCP_FBCTRL__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_BIAS] =
+ MSM8X16_WCD_A_ANALOG_NCP_BIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_VCTRL] =
+ MSM8X16_WCD_A_ANALOG_NCP_VCTRL__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_TEST] =
+ MSM8X16_WCD_A_ANALOG_NCP_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER] =
+ MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL___POR,
+ [MSM8X16_WCD_A_ANALOG_RX_ATEST] =
+ MSM8X16_WCD_A_ANALOG_RX_ATEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] =
+ MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET] =
+ MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG__POR,
+ [MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT] =
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT__POR,
+ [MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE] =
+ MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE__POR,
+ [MSM8X16_WCD_A_ANALOG_BYPASS_MODE] =
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL] =
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO] =
+ MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO__POR,
+ [MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE] =
+ MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1] =
+ MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST_2] =
+ MSM8X16_WCD_A_ANALOG_BOOST_TEST_2__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] =
+ MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR] =
+ MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR__POR,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL] =
+ MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_OTHR_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_MCLK_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_SD_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_SD_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE] =
+ MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE__POR,
+ [MSM8X16_WCD_A_CDC_TOP_CTL] =
+ MSM8X16_WCD_A_CDC_TOP_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL] =
+ MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL] =
+ MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG__POR,
+ [MSM8X16_WCD_A_CDC_TX1_MUX_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_MUX_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_MUX_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_MUX_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_DMIC_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_DMIC_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_DMIC_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_DMIC_CTL__POR,
+};
diff --git a/sound/soc/codecs/msm8x16-wcd.c b/sound/soc/codecs/msm8x16-wcd.c
new file mode 100644
index 0000000000000..0554f21d8f991
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd.c
@@ -0,0 +1,2266 @@
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include "msm8x16-wcd.h"
+#include "msm8x16_wcd_registers.h"
+
+#define MSM8X16_WCD_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM8X16_WCD_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+#define TOMBAK_VERSION_1_0 0
+#define TOMBAK_IS_1_0(ver) \
+ ((ver == TOMBAK_VERSION_1_0) ? 1 : 0)
+
+#define HPHL_PA_DISABLE (0x01 << 1)
+#define HPHR_PA_DISABLE (0x01 << 2)
+#define EAR_PA_DISABLE (0x01 << 3)
+#define SPKR_PA_DISABLE (0x01 << 4)
+
+#define MICBIAS_DEFAULT_VAL 1800000
+#define MICBIAS_MIN_VAL 1600000
+#define MICBIAS_STEP_SIZE 50000
+
+#define DEFAULT_BOOST_VOLTAGE 5000
+#define MIN_BOOST_VOLTAGE 4000
+#define MAX_BOOST_VOLTAGE 5550
+#define BOOST_VOLTAGE_STEP 50
+
+#define VOLTAGE_CONVERTER(value, min_value, step_size)\
+ ((value - min_value)/step_size);
+
+enum {
+ AIF1_PB = 0,
+ AIF1_CAP,
+ NUM_CODEC_DAIS,
+};
+
+static unsigned long rx_digital_gain_reg[] = {
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL,
+};
+
+static unsigned long tx_digital_gain_reg[] = {
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN,
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN,
+};
+
+struct wcd_chip {
+ struct regmap *analog_map;
+ struct regmap *digital_map;
+ unsigned int analog_base;
+ u16 pmic_rev;
+ u16 codec_version;
+ bool spk_boost_set;
+ u32 mute_mask;
+ u32 rx_bias_count;
+ bool ear_pa_boost_set;
+ bool lb_mode;
+ struct clk *mclk;
+
+ struct regulator *vddio;
+ struct regulator *vdd_pa;
+ struct regulator *vdd_px;
+ struct regulator *vdd_cp;
+ struct regulator *vdd_mic_bias;
+};
+
+static int msm8x16_wcd_volatile(struct snd_soc_codec *codec, unsigned int reg)
+{
+ return msm8x16_wcd_reg_readonly[reg];
+}
+
+static int msm8x16_wcd_readable(struct snd_soc_codec *ssc, unsigned int reg)
+{
+ return msm8x16_wcd_reg_readable[reg];
+}
+
+static int __msm8x16_wcd_reg_write(struct snd_soc_codec *codec,
+ unsigned short reg, u8 val)
+{
+ int ret = -EINVAL;
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+
+ if (MSM8X16_WCD_IS_TOMBAK_REG(reg)) {
+ ret = regmap_write(chip->analog_map,
+ chip->analog_base + reg, val);
+ } else if (MSM8X16_WCD_IS_DIGITAL_REG(reg)) {
+ u32 temp = val & 0x000000FF;
+ u16 offset = (reg ^ 0x0200) & 0x0FFF;
+
+ ret = regmap_write(chip->digital_map, offset, temp);
+ }
+
+ return ret;
+}
+
+static int msm8x16_wcd_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg > MSM8X16_WCD_MAX_REGISTER);
+ if (!msm8x16_wcd_volatile(codec, reg))
+ msm8x16_wcd_reset_reg_defaults[reg] = value;
+
+ return __msm8x16_wcd_reg_write(codec, reg, (u8)value);
+}
+
+static int __msm8x16_wcd_reg_read(struct snd_soc_codec *codec,
+ unsigned short reg)
+{
+ int ret = -EINVAL;
+ u32 temp = 0;
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+
+ if (MSM8X16_WCD_IS_TOMBAK_REG(reg)) {
+ ret = regmap_read(chip->analog_map,
+ chip->analog_base + reg, &temp);
+ } else if (MSM8X16_WCD_IS_DIGITAL_REG(reg)) {
+ u32 val;
+ u16 offset = (reg ^ 0x0200) & 0x0FFF;
+
+ ret = regmap_read(chip->digital_map, offset, &val);
+ temp = (u8)val;
+ }
+
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: codec read failed for reg 0x%x\n",
+ __func__, reg);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "Read 0x%02x from 0x%x\n", temp, reg);
+
+ return temp;
+}
+
+static unsigned int msm8x16_wcd_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg > MSM8X16_WCD_MAX_REGISTER);
+
+ if (!msm8x16_wcd_volatile(codec, reg) &&
+ msm8x16_wcd_readable(codec, reg) &&
+ reg < codec->driver->reg_cache_size) {
+ return msm8x16_wcd_reset_reg_defaults[reg];
+ }
+
+ val = __msm8x16_wcd_reg_read(codec, reg);
+
+ return val;
+}
+
+static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults[] = {
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults_2_0[] = {
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS, 0x4B),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_NCP_FBCTRL, 0x28),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL, 0x5F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static int msm8x16_wcd_bringup(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec, MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4, 0x01);
+ snd_soc_write(codec, MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4, 0x01);
+ return 0;
+}
+
+static const struct msm8x16_wcd_reg_mask_val
+ msm8x16_wcd_codec_reg_init_val[] = {
+
+ /* Initialize current threshold to 350MA
+ * number of wait and run cycles to 4096
+ */
+ {MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL, 0xFF, 0xD1},
+ {MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+};
+
+static void msm8x16_wcd_codec_init_reg(struct snd_soc_codec *codec)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_codec_reg_init_val); i++)
+ snd_soc_update_bits(codec,
+ msm8x16_wcd_codec_reg_init_val[i].reg,
+ msm8x16_wcd_codec_reg_init_val[i].mask,
+ msm8x16_wcd_codec_reg_init_val[i].val);
+}
+
+static void msm8x16_wcd_update_reg_defaults(struct snd_soc_codec *codec)
+{
+ u32 i;
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ if (TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev)) {
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults); i++)
+ snd_soc_write(codec, msm8x16_wcd_reg_defaults[i].reg,
+ msm8x16_wcd_reg_defaults[i].val);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults_2_0); i++)
+ snd_soc_write(codec,
+ msm8x16_wcd_reg_defaults_2_0[i].reg,
+ msm8x16_wcd_reg_defaults_2_0[i].val);
+ }
+}
+
+static int msm8x16_wcd_device_up(struct snd_soc_codec *codec)
+{
+ u32 reg;
+
+ dev_dbg(codec->dev, "%s: device up!\n", __func__);
+ msm8x16_wcd_bringup(codec);
+
+ for (reg = 0; reg < ARRAY_SIZE(msm8x16_wcd_reset_reg_defaults); reg++)
+ if (msm8x16_wcd_reg_readable[reg])
+ msm8x16_wcd_write(codec,
+ reg, msm8x16_wcd_reset_reg_defaults[reg]);
+
+ /* delay is required to make sure sound card state updated */
+ usleep_range(5000, 5100);
+
+ msm8x16_wcd_codec_init_reg(codec);
+ msm8x16_wcd_update_reg_defaults(codec);
+
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_clock_block(struct snd_soc_codec *codec,
+ int enable)
+{
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+ unsigned long mclk_rate;
+
+ if (enable) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_MCLK_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL, 0x03, 0x03);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
+
+ mclk_rate = clk_get_rate(msm8x16_wcd->mclk);
+
+ if (mclk_rate == 12288000)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TOP_CTL, 0x01, 0x00);
+ else if (mclk_rate == 9600000)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TOP_CTL, 0x01, 0x01);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL, 0x03, 0x00);
+
+ }
+ return 0;
+}
+
+#define MICBIAS_EXT_BYP_CAP 0x00
+#define MICBIAS_NO_EXT_BYP_CAP 0x01
+
+static void msm8x16_wcd_configure_cap(struct snd_soc_codec *codec,
+ bool micbias1, bool micbias2)
+{
+
+// struct msm8916_asoc_mach_data *pdata = NULL;
+//FIXME should come from DT
+ int micbias1_cap_mode = MICBIAS_EXT_BYP_CAP, micbias2_cap_mode = MICBIAS_NO_EXT_BYP_CAP;
+
+ //pdata = snd_soc_card_get_drvdata(codec->card);
+
+ pr_debug("\n %s: micbias1 %x micbias2 = %d\n", __func__, micbias1,
+ micbias2);
+ if (micbias1 && micbias2) {
+ if ((micbias1_cap_mode
+ == MICBIAS_EXT_BYP_CAP) ||
+ (micbias2_cap_mode
+ == MICBIAS_EXT_BYP_CAP))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN,
+ 0x40, (MICBIAS_EXT_BYP_CAP << 6));
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN,
+ 0x40, (MICBIAS_NO_EXT_BYP_CAP << 6));
+ } else if (micbias2) {
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_MICB_1_EN,
+ 0x40, (micbias2_cap_mode << 6));
+ } else if (micbias1) {
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_MICB_1_EN,
+ 0x40, (micbias1_cap_mode << 6));
+ } else {
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_MICB_1_EN,
+ 0x40, 0x00);
+ }
+}
+
+static int msm8x16_wcd_codec_probe(struct snd_soc_codec *codec)
+{
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+ int err;
+
+ snd_soc_codec_set_drvdata(codec, chip);
+ chip->pmic_rev = snd_soc_read(codec, MSM8X16_WCD_A_DIGITAL_REVISION1);
+ dev_info(codec->dev, "%s :PMIC REV: %d", __func__,
+ chip->pmic_rev);
+
+ chip->codec_version = snd_soc_read(codec,
+ MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE);
+ dev_info(codec->dev, "%s :CODEC Version: %d", __func__,
+ chip->codec_version);
+
+ msm8x16_wcd_device_up(codec);
+
+ /* Set initial cap mode */
+ msm8x16_wcd_configure_cap(codec, false, false);
+
+ regulator_set_voltage(chip->vddio, 1800000, 1800000);
+ err = regulator_enable(chip->vddio);
+ if (err < 0) {
+ dev_err(codec->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+ regulator_set_voltage(chip->vdd_pa, 1800000, 2200000);
+ err = regulator_enable(chip->vdd_pa);
+ if (err < 0) {
+ dev_err(codec->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+
+ regulator_set_voltage(chip->vdd_mic_bias, 3075000, 3075000);
+ err = regulator_enable(chip->vdd_mic_bias);
+ if (err < 0) {
+ dev_err(codec->dev, "failed to enable micbias regulator\n");
+ return err;
+ }
+ msm8x16_wcd_codec_enable_clock_block(codec, 1);
+
+ return 0;
+}
+
+static int msm8x16_wcd_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
+ __func__,
+ substream->name, substream->stream);
+ return 0;
+}
+
+static void msm8x16_wcd_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev,
+ "%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+}
+
+static int msm8x16_wcd_set_interpolator_rate(struct snd_soc_dai *dai,
+ u8 rx_fs_rate_reg_val, u32 sample_rate)
+{
+ return 0;
+}
+
+static int msm8x16_wcd_set_decimator_rate(struct snd_soc_dai *dai,
+ u8 tx_fs_rate_reg_val, u32 sample_rate)
+{
+
+ return 0;
+}
+
+static int msm8x16_wcd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u8 tx_fs_rate, rx_fs_rate;
+ int ret;
+
+ dev_err(dai->codec->dev,
+ "%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+ __func__, dai->name, dai->id, params_rate(params),
+ params_channels(params), params_format(params));
+
+ switch (params_rate(params)) {
+ case 8000:
+ tx_fs_rate = 0x00;
+ rx_fs_rate = 0x00;
+ break;
+ case 16000:
+ tx_fs_rate = 0x01;
+ rx_fs_rate = 0x20;
+ break;
+ case 32000:
+ tx_fs_rate = 0x02;
+ rx_fs_rate = 0x40;
+ break;
+ case 48000:
+ tx_fs_rate = 0x03;
+ rx_fs_rate = 0x60;
+ break;
+ case 96000:
+ tx_fs_rate = 0x04;
+ rx_fs_rate = 0x80;
+ break;
+ case 192000:
+ tx_fs_rate = 0x05;
+ rx_fs_rate = 0xA0;
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid sampling rate %d\n", __func__,
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_CAPTURE:
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL, 0x07, tx_fs_rate);
+ ret = msm8x16_wcd_set_decimator_rate(dai, tx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
+ }
+ break;
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = msm8x16_wcd_set_interpolator_rate(dai, rx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid stream type %d\n", __func__,
+ substream->stream);
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x20);
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL, 0x20, 0x00);
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x00);
+ break;
+ default:
+ dev_err(dai->dev, "%s: wrong format selected\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm8x16_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm8x16_wcd_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm8x16_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm8x16_wcd_dai_ops = {
+ .startup = msm8x16_wcd_startup,
+ .shutdown = msm8x16_wcd_shutdown,
+ .hw_params = msm8x16_wcd_hw_params,
+ .set_sysclk = msm8x16_wcd_set_dai_sysclk,
+ .set_fmt = msm8x16_wcd_set_dai_fmt,
+ .set_channel_map = msm8x16_wcd_set_channel_map,
+};
+
+static struct snd_soc_dai_driver msm8x16_wcd_codec_dai[] = {
+ [0] = {
+ .name = "msm8x16_wcd_i2s_rx1",
+ .id = AIF1_PB,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .rates = MSM8X16_WCD_RATES,
+ .formats = MSM8X16_WCD_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 3,
+ },
+ .ops = &msm8x16_wcd_dai_ops,
+ },
+ [1] = {
+ .name = "msm8x16_wcd_i2s_tx1",
+ .id = AIF1_CAP,
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .rates = MSM8X16_WCD_RATES,
+ .formats = MSM8X16_WCD_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &msm8x16_wcd_dai_ops,
+ },
+};
+
+static int msm8x16_wcd_codec_remove(struct snd_soc_codec *codec)
+{
+ /* TODO */
+ return 0;
+};
+
+static int msm8x16_wcd_spk_boost_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct wcd_chip *msm8x16_wcd = dev_get_drvdata(codec->dev);
+
+ if (msm8x16_wcd->spk_boost_set == false) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (msm8x16_wcd->spk_boost_set == true) {
+ ucontrol->value.integer.value[0] = 1;
+ } else {
+ dev_err(codec->dev, "%s: ERROR: Unsupported Speaker Boost = %d\n",
+ __func__, msm8x16_wcd->spk_boost_set);
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n", __func__,
+ msm8x16_wcd->spk_boost_set);
+ return 0;
+}
+
+static int msm8x16_wcd_spk_boost_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm8x16_wcd->spk_boost_set = false;
+ break;
+ case 1:
+ msm8x16_wcd->spk_boost_set = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n",
+ __func__, msm8x16_wcd->spk_boost_set);
+ return 0;
+}
+
+static const char * const hph_text[] = {
+ "ZERO", "Switch",
+};
+
+static const struct soc_enum hph_enum =
+ SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(hph_text), hph_text);
+
+static const struct snd_kcontrol_new hphl_mux[] = {
+ SOC_DAPM_ENUM("HPHL", hph_enum)
+};
+
+static const struct snd_kcontrol_new hphr_mux[] = {
+ SOC_DAPM_ENUM("HPHR", hph_enum)
+};
+
+static const struct snd_kcontrol_new spkr_switch[] = {
+ SOC_DAPM_SINGLE("Switch",
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 7, 1, 0)
+};
+
+static void msm8x16_wcd_codec_enable_adc_block(struct snd_soc_codec *codec,
+ int enable)
+{
+ //struct msm8x16_wcd_priv *wcd8x16 = snd_soc_codec_get_drvdata(codec);
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, enable);
+
+ if (enable) {
+ //wcd8x16->adc_count++;
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x10, 0x10);
+ } else {
+ //wcd8x16->adc_count--;
+ //if (!wcd8x16->adc_count) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x10, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL,
+ 0x20, 0x0);
+ //}
+ }
+}
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+
+static const char * const rx_mix1_text[] = {
+ "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char * const rx_mix2_text[] = {
+ "ZERO", "IIR1", "IIR2"
+};
+
+static const char * const dec_mux_text[] = {
+ "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
+};
+
+static const char * const adc2_mux_text[] = {
+ "ZERO", "INP2", "INP3"
+};
+
+static const char * const rdac2_mux_text[] = {
+ "ZERO", "RX2", "RX1"
+};
+
+static const char * const iir_inp1_text[] = {
+ "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
+};
+
+static const char * const iir1_inp1_text[] = {
+ "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
+};
+
+static const struct soc_enum adc2_enum =
+ SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(adc2_mux_text), adc2_mux_text);
+
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL,
+ 0, 6, rx_mix1_text);
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+/* DEC */
+static const struct soc_enum dec1_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL,
+ 0, 6, dec_mux_text);
+
+static const struct soc_enum dec2_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL,
+ 3, 6, dec_mux_text);
+
+static const struct soc_enum rdac2_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
+ 0, 3, rdac2_mux_text);
+
+static const struct soc_enum iir1_inp1_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL,
+ 0, 6, iir_inp1_text);
+
+static const struct soc_enum iir2_inp1_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL,
+ 0, 6, iir_inp1_text);
+static const struct snd_kcontrol_new iir2_inp1_mux =
+ SOC_DAPM_ENUM("IIR2 INP1 Mux", iir2_inp1_mux_enum);
+
+static const struct soc_enum rx3_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+static const struct snd_kcontrol_new rx_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new dec1_mux =
+ SOC_DAPM_ENUM("DEC1 MUX Mux", dec1_mux_enum);
+
+static const struct snd_kcontrol_new dec2_mux =
+ SOC_DAPM_ENUM("DEC2 MUX Mux", dec2_mux_enum);
+
+static const struct snd_kcontrol_new rdac2_mux =
+ SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
+
+static const struct snd_kcontrol_new iir1_inp1_mux =
+ SOC_DAPM_ENUM("IIR1 INP1 Mux", iir1_inp1_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new tx_adc2_mux =
+ SOC_DAPM_ENUM("ADC2 MUX Mux", adc2_enum);
+
+static const char * const msm8x16_wcd_loopback_mode_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm8x16_wcd_loopback_mode_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_loopback_mode_ctrl_text),
+};
+
+static int msm8x16_wcd_codec_enable_on_demand_supply(
+ struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ int ret = 0;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ ret = regulator_enable(msm8x16_wcd->vdd_mic_bias);
+ if (ret)
+ dev_err(codec->dev, "%s: Failed to enable vdd micbias\n",
+ __func__);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ ret = regulator_disable(msm8x16_wcd->vdd_mic_bias);
+ if (ret)
+ dev_err(codec->dev, "%s: Failed to disable vdd-micbias\n",
+ __func__);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static const char * const msm8x16_wcd_ear_pa_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm8x16_wcd_ear_pa_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_boost_ctrl_text),
+};
+
+static const char * const msm8x16_wcd_ear_pa_gain_text[] = {
+ "POS_6_DB", "POS_1P5_DB"};
+static const struct soc_enum msm8x16_wcd_ear_pa_gain_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_gain_text),
+};
+
+static const char * const msm8x16_wcd_spk_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm8x16_wcd_spk_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_spk_boost_ctrl_text),
+};
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+ "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+static const struct soc_enum cf_dec1_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_TX1_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_TX2_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_rxmix1_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix2_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix3_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
+
+static const struct snd_kcontrol_new msm8x16_wcd_snd_controls[] = {
+
+ SOC_ENUM_EXT("Speaker Boost", msm8x16_wcd_spk_boost_ctl_enum[0],
+ msm8x16_wcd_spk_boost_get, msm8x16_wcd_spk_boost_set),
+
+ SOC_SINGLE_TLV("ADC1 Volume", MSM8X16_WCD_A_ANALOG_TX_1_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", MSM8X16_WCD_A_ANALOG_TX_2_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", MSM8X16_WCD_A_ANALOG_TX_3_EN, 3,
+ 8, 0, analog_gain),
+
+ SOC_SINGLE_SX_TLV("RX1 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX2 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX3 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+
+ SOC_SINGLE("RX1 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX1_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX2 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX2_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX3 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX3_B5_CTL, 2, 1, 0),
+
+ SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
+ SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
+ SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
+};
+
+static const struct snd_kcontrol_new ear_pa_switch[] = {
+ SOC_DAPM_SINGLE("Switch",
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 5, 1, 0)
+};
+
+static int msm8x16_wcd_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after select EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after enabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x40, 0x40);
+ usleep_range(7000, 7100);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= EAR_PA_DISABLE;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 7ms after disabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x40, 0x00);
+ usleep_range(7000, 7100);
+ /*
+ * Reset pa select bit from ear to hph after ear pa
+ * is disabled to reduce ear turn off pop
+ */
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x80, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_adc(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ //struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u16 adc_reg;
+ u8 init_bit_shift;
+
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+ adc_reg = MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2;
+
+ if (w->reg == MSM8X16_WCD_A_ANALOG_TX_1_EN)
+ init_bit_shift = 5;
+ else if ((w->reg == MSM8X16_WCD_A_ANALOG_TX_2_EN) ||
+ (w->reg == MSM8X16_WCD_A_ANALOG_TX_3_EN))
+ init_bit_shift = 4;
+ else {
+ dev_err(codec->dev, "%s: Error, invalid adc register\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm8x16_wcd_codec_enable_adc_block(codec, 1);
+ if (w->reg == MSM8X16_WCD_A_ANALOG_TX_2_EN)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MICB_1_CTL, 0x02, 0x02);
+ /*
+ * Add delay of 10 ms to give sufficient time for the voltage
+ * to shoot up and settle so that the txfe init does not
+ * happen when the input voltage is changing too much.
+ */
+ usleep_range(10000, 10010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift,
+ 1 << init_bit_shift);
+ if (w->reg == MSM8X16_WCD_A_ANALOG_TX_1_EN)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL,
+ 0x03, 0x00);
+ else if ((w->reg == MSM8X16_WCD_A_ANALOG_TX_2_EN) ||
+ (w->reg == MSM8X16_WCD_A_ANALOG_TX_3_EN))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL,
+ 0x03, 0x00);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /*
+ * Add delay of 12 ms before deasserting the init
+ * to reduce the tx pop
+ */
+ usleep_range(12000, 12010);
+ snd_soc_update_bits(codec, adc_reg, 1 << init_bit_shift, 0x00);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm8x16_wcd_codec_enable_adc_block(codec, 0);
+ if (w->reg == MSM8X16_WCD_A_ANALOG_TX_2_EN)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MICB_1_CTL, 0x02, 0x00);
+ if (w->reg == MSM8X16_WCD_A_ANALOG_TX_1_EN)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL,
+ 0x03, 0x02);
+ else if ((w->reg == MSM8X16_WCD_A_ANALOG_TX_2_EN) ||
+ (w->reg == MSM8X16_WCD_A_ANALOG_TX_3_EN))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL,
+ 0x03, 0x02);
+
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
+ if (!msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
+ if (!TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ if (msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0xEF, 0xEF);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= SPKR_PA_DISABLE;
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x00);
+ if (msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0xEF, 0x00);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
+ if (!TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 0x01, 0x00);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static void msm8x16_wcd_micbias_2_enable(struct snd_soc_codec *codec, bool on)
+{
+ if (on) {
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_MICB_1_CTL,
+ 0x60, 0x60);
+ snd_soc_write(codec, MSM8X16_WCD_A_ANALOG_MICB_1_VAL,
+ 0xC0);
+ /*
+ * Special headset needs MICBIAS as 2.7V so wait for
+ * 50 msec for the MICBIAS to reach 2.7 volts.
+ */
+ msleep(50);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_MICB_1_CTL,
+ 0x60, 0x00);
+ }
+}
+
+static s32 g_dmic_clk_cnt;
+static int msm8x16_wcd_codec_enable_dmic(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ u8 dmic_clk_en;
+ u16 dmic_clk_reg;
+ s32 *dmic_clk_cnt;
+ unsigned int dmic;
+ int ret;
+ char *dec_num = strpbrk(w->name, "12");
+
+ if (dec_num == NULL) {
+ dev_err(codec->dev, "%s: Invalid DMIC\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = kstrtouint(dec_num, 10, &dmic);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: Invalid DMIC line on the codec\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (dmic) {
+ case 1:
+ case 2:
+ dmic_clk_en = 0x01;
+ dmic_clk_cnt = &g_dmic_clk_cnt;
+ dmic_clk_reg = MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL;
+ dev_dbg(codec->dev,
+ "%s() event %d DMIC%d dmic_1_2_clk_cnt %d\n",
+ __func__, event, dmic, *dmic_clk_cnt);
+ break;
+ default:
+ dev_err(codec->dev, "%s: Invalid DMIC Selection\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ (*dmic_clk_cnt)++;
+ if (*dmic_clk_cnt == 1) {
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ 0x0E, 0x02);
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ dmic_clk_en, dmic_clk_en);
+ }
+ if (dmic == 1)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TX1_DMIC_CTL, 0x07, 0x01);
+ if (dmic == 2)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TX2_DMIC_CTL, 0x07, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ (*dmic_clk_cnt)--;
+ if (*dmic_clk_cnt == 0)
+ snd_soc_update_bits(codec, dmic_clk_reg,
+ dmic_clk_en, 0);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_micbias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+ u16 micb_int_reg;
+ char *internal1_text = "Internal1";
+ char *internal2_text = "Internal2";
+ char *internal3_text = "Internal3";
+ char *external2_text = "External2";
+ char *external_text = "External";
+ bool micbias2;
+
+ switch (w->reg) {
+ case MSM8X16_WCD_A_ANALOG_MICB_1_EN:
+ case MSM8X16_WCD_A_ANALOG_MICB_2_EN:
+ micb_int_reg = MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS;
+ break;
+ default:
+ dev_err(codec->dev,
+ "%s: Error, invalid micbias register 0x%x\n",
+ __func__, w->reg);
+ return -EINVAL;
+ }
+
+ micbias2 = (snd_soc_read(codec, MSM8X16_WCD_A_ANALOG_MICB_2_EN) & 0x80);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (strnstr(w->name, internal1_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x80, 0x80);
+ } else if (strnstr(w->name, internal2_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x10, 0x10);
+ snd_soc_update_bits(codec, w->reg, 0x60, 0x00);
+ } else if (strnstr(w->name, internal3_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x2);
+ }
+ if (!strnstr(w->name, external_text, 30))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN, 0x05, 0x04);
+ if (w->reg == MSM8X16_WCD_A_ANALOG_MICB_1_EN)
+ msm8x16_wcd_configure_cap(codec, true, micbias2);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(20000, 20100);
+ if (strnstr(w->name, internal1_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x40, 0x40);
+ } else if (strnstr(w->name, internal2_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x08, 0x08);
+ msm8x16_wcd_micbias_2_enable(codec, true);
+
+ msm8x16_wcd_configure_cap(codec, false, true);
+ regmap_write(msm8x16_wcd->analog_map, 0xf144, 0x95);
+ } else if (strnstr(w->name, internal3_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x01, 0x01);
+ } else if (strnstr(w->name, external2_text, 30)) {
+ msm8x16_wcd_micbias_2_enable(codec, true);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (strnstr(w->name, internal1_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0xC0, 0x40);
+ } else if (strnstr(w->name, internal2_text, 30)) {
+ msm8x16_wcd_micbias_2_enable(codec, false);
+ } else if (strnstr(w->name, internal3_text, 30)) {
+ snd_soc_update_bits(codec, micb_int_reg, 0x2, 0x0);
+ } else if (strnstr(w->name, external2_text, 30)) {
+ /*
+ * send micbias turn off event to mbhc driver and then
+ * break, as no need to set MICB_1_EN register.
+ */
+ msm8x16_wcd_micbias_2_enable(codec, false);
+ break;
+ }
+ if (w->reg == MSM8X16_WCD_A_ANALOG_MICB_1_EN)
+ msm8x16_wcd_configure_cap(codec, false, micbias2);
+ break;
+ }
+
+ return 0;
+}
+
+#define TX_MUX_CTL_CUT_OFF_FREQ_MASK 0x30
+#define CF_MIN_3DB_4HZ 0x0
+#define CF_MIN_3DB_75HZ 0x1
+#define CF_MIN_3DB_150HZ 0x2
+
+static int msm8x16_wcd_codec_enable_dec(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ unsigned int decimator;
+ char *dec_name = NULL;
+ char *widget_name = NULL;
+ char *temp;
+ int ret = 0;
+ u16 dec_reset_reg, tx_vol_ctl_reg, tx_mux_ctl_reg;
+ u8 dec_hpf_cut_of_freq;
+ int offset;
+ char *dec_num;
+ dev_dbg(codec->dev, "%s %d\n", __func__, event);
+
+ widget_name = kstrndup(w->name, 15, GFP_KERNEL);
+ if (!widget_name)
+ return -ENOMEM;
+ temp = widget_name;
+
+ dec_name = strsep(&widget_name, " ");
+ widget_name = temp;
+ if (!dec_name) {
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, w->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dec_num = strpbrk(dec_name, "12");
+ if (dec_num == NULL) {
+ dev_err(codec->dev, "%s: Invalid Decimator\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = kstrtouint(dec_num, 10, &decimator);
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: Invalid decimator = %s\n", __func__, dec_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_err(codec->dev,
+ "%s(): widget = %s dec_name = %s decimator = %u\n", __func__,
+ w->name, dec_name, decimator);
+
+ if (w->reg == MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL) {
+ dec_reset_reg = MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL;
+ offset = 0;
+ } else {
+ dev_err(codec->dev, "%s: Error, incorrect dec\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ tx_vol_ctl_reg = MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG +
+ 32 * (decimator - 1);
+ tx_mux_ctl_reg = MSM8X16_WCD_A_CDC_TX1_MUX_CTL +
+ 32 * (decimator - 1);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ /* Enableable TX digital mute */
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+ dec_hpf_cut_of_freq = snd_soc_read(codec, tx_mux_ctl_reg);
+ dec_hpf_cut_of_freq = (dec_hpf_cut_of_freq & 0x30) >> 4;
+ if ((dec_hpf_cut_of_freq != CF_MIN_3DB_150HZ)) {
+
+ /* set cut of freq to CF_MIN_3DB_150HZ (0x1); */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x30,
+ CF_MIN_3DB_150HZ << 4);
+ }
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV,
+ 0xFF, 0x42);
+
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ /* enable HPF */
+ snd_soc_update_bits(codec, tx_mux_ctl_reg , 0x08, 0x00);
+ /* apply the digital gain after the decimator is enabled*/
+ if ((w->shift) < ARRAY_SIZE(tx_digital_gain_reg))
+ snd_soc_write(codec,
+ tx_digital_gain_reg[w->shift + offset],
+ snd_soc_read(codec,
+ tx_digital_gain_reg[w->shift + offset])
+ );
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x01);
+ msleep(20);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift,
+ 1 << w->shift);
+ snd_soc_update_bits(codec, dec_reset_reg, 1 << w->shift, 0x0);
+ snd_soc_update_bits(codec, tx_mux_ctl_reg, 0x08, 0x08);
+ snd_soc_update_bits(codec, tx_vol_ctl_reg, 0x01, 0x00);
+ break;
+ }
+
+out:
+ kfree(widget_name);
+ return ret;
+}
+
+static int msm8x16_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* apply the digital gain after the interpolator is enabled*/
+ if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+ snd_soc_write(codec,
+ rx_digital_gain_reg[w->shift],
+ snd_soc_read(codec,
+ rx_digital_gain_reg[w->shift])
+ );
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 1 << w->shift);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 0x0);
+ /*
+ * disable the mute enabled during the PMD of this device
+ */
+ if (msm8x16_wcd->mute_mask & HPHL_PA_DISABLE) {
+ pr_debug("disabling HPHL mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(HPHL_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & HPHR_PA_DISABLE) {
+ pr_debug("disabling HPHR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(HPHR_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & SPKR_PA_DISABLE) {
+ pr_debug("disabling SPKR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(SPKR_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & EAR_PA_DISABLE) {
+ pr_debug("disabling EAR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(EAR_PA_DISABLE);
+ }
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->shift == 2)
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ if (msm8x16_wcd->spk_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SEC_ACCESS,
+ 0xA5, 0xA5);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3,
+ 0x0F, 0x0F);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT,
+ 0x82, 0x82);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT,
+ 0x83, 0x83);
+ } else if (msm8x16_wcd->ear_pa_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SEC_ACCESS,
+ 0xA5, 0xA5);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3,
+ 0x07, 0x07);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x40, 0x40);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ } else {
+ snd_soc_update_bits(codec, w->reg, 1<<w->shift,
+ 1<<w->shift);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (msm8x16_wcd->spk_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0x5F);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x00);
+ } else if (msm8x16_wcd->ear_pa_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x40, 0x00);
+ } else {
+ snd_soc_update_bits(codec, w->reg, 1<<w->shift, 0x00);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_rx_chain(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ dev_dbg(codec->dev,
+ "%s: PMU:Sleeping 20ms after disabling mute\n",
+ __func__);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ dev_dbg(codec->dev,
+ "%s: PMD:Sleeping 20ms after disabling mute\n",
+ __func__);
+ snd_soc_update_bits(codec, w->reg,
+ 1 << w->shift, 0x00);
+ msleep(20);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm8x16_wcd->rx_bias_count++;
+ if (msm8x16_wcd->rx_bias_count == 1)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC,
+ 0x81, 0x81);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm8x16_wcd->rx_bias_count--;
+ if (msm8x16_wcd->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC,
+ 0x81, 0x00);
+ break;
+ }
+ dev_dbg(codec->dev, "%s rx_bias_count = %d\n",
+ __func__, msm8x16_wcd->rx_bias_count);
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "EAR CP")))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0xC0, 0xC0);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ if (!(strcmp(w->name, "EAR CP")))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ else {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x40, 0x00);
+ if (msm8x16_wcd->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ dev_dbg(codec->dev, "%s: rx_bias_count = %d\n",
+ __func__, msm8x16_wcd->rx_bias_count);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_hphl_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_hph_pa_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+ }
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_NCP_FBCTRL, 0x20, 0x20);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(4000, 4100);
+ if (w->shift == 5)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ else if (w->shift == 4)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x00);
+ usleep_range(10000, 10100);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= HPHL_PA_DISABLE;
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= HPHR_PA_DISABLE;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
+
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
+ }
+ usleep_range(4000, 4100);
+
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x40, 0x40);
+ dev_dbg(codec->dev,
+ "%s: sleep 10 ms after %s PA disable.\n", __func__,
+ w->name);
+ usleep_range(10000, 10100);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ {"RX_I2S_CLK", NULL, "CDC_CONN"},
+ {"I2S RX1", NULL, "RX_I2S_CLK"},
+ {"I2S RX2", NULL, "RX_I2S_CLK"},
+ {"I2S RX3", NULL, "RX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "TX_I2S_CLK"},
+ {"I2S TX2", NULL, "TX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "DEC1 MUX"},
+ {"I2S TX2", NULL, "DEC2 MUX"},
+
+ /* RDAC Connections */
+ {"HPHR DAC", NULL, "RDAC2 MUX"},
+ {"RDAC2 MUX", "RX1", "RX1 CHAIN"},
+ {"RDAC2 MUX", "RX2", "RX2 CHAIN"},
+
+ /* Earpiece (RX MIX1) */
+ {"EAR", NULL, "EAR_S"},
+ {"EAR_S", "Switch", "EAR PA"},
+ {"EAR PA", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "HPHL DAC"},
+ {"EAR PA", NULL, "HPHR DAC"},
+ {"EAR PA", NULL, "EAR CP"},
+
+ /* Headset (RX MIX1 and RX MIX2) */
+ {"HEADPHONE", NULL, "HPHL PA"},
+ {"HEADPHONE", NULL, "HPHR PA"},
+
+ {"HPHL PA", NULL, "HPHL"},
+ {"HPHR PA", NULL, "HPHR"},
+ {"HPHL", "Switch", "HPHL DAC"},
+ {"HPHR", "Switch", "HPHR DAC"},
+ {"HPHL PA", NULL, "CP"},
+ {"HPHL PA", NULL, "RX_BIAS"},
+ {"HPHR PA", NULL, "CP"},
+ {"HPHR PA", NULL, "RX_BIAS"},
+ {"HPHL DAC", NULL, "RX1 CHAIN"},
+
+ {"SPK_OUT", NULL, "SPK PA"},
+ {"SPK PA", NULL, "SPK_RX_BIAS"},
+ {"SPK PA", NULL, "SPK DAC"},
+ {"SPK DAC", "Switch", "RX3 CHAIN"},
+ {"SPK DAC", NULL, "VDD_SPKDRV"},
+
+ {"RX1 CHAIN", NULL, "RX1 CLK"},
+ {"RX2 CHAIN", NULL, "RX2 CLK"},
+ {"RX3 CHAIN", NULL, "RX3 CLK"},
+ {"RX1 CHAIN", NULL, "RX1 MIX2"},
+ {"RX2 CHAIN", NULL, "RX2 MIX2"},
+ {"RX3 CHAIN", NULL, "RX3 MIX1"},
+
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+ {"RX1 MIX2", NULL, "RX1 MIX1"},
+ {"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
+ {"RX2 MIX2", NULL, "RX2 MIX1"},
+ {"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
+
+ {"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP2", "IIR2", "IIR2"},
+ {"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+ {"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP2", "IIR2", "IIR2"},
+
+ {"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP1", "IIR2", "IIR2"},
+ {"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP2", "IIR2", "IIR2"},
+
+ {"RX1 MIX2 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX2 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX2 INP1", "IIR2", "IIR2"},
+ {"RX2 MIX2 INP1", "IIR2", "IIR2"},
+
+ /* Decimator Inputs */
+ {"DEC1 MUX", "DMIC1", "DMIC1"},
+ {"DEC1 MUX", "DMIC2", "DMIC2"},
+ {"DEC1 MUX", "ADC1", "ADC1"},
+ {"DEC1 MUX", "ADC2", "ADC2"},
+ {"DEC1 MUX", "ADC3", "ADC3"},
+ {"DEC1 MUX", NULL, "CDC_CONN"},
+
+ {"DEC2 MUX", "DMIC1", "DMIC1"},
+ {"DEC2 MUX", "DMIC2", "DMIC2"},
+ {"DEC2 MUX", "ADC1", "ADC1"},
+ {"DEC2 MUX", "ADC2", "ADC2"},
+ {"DEC2 MUX", "ADC3", "ADC3"},
+ {"DEC2 MUX", NULL, "CDC_CONN"},
+
+ /* ADC Connections */
+ {"ADC2", NULL, "ADC2 MUX"},
+ {"ADC3", NULL, "ADC2 MUX"},
+ {"ADC2 MUX", "INP2", "ADC2_INP2"},
+ {"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+ {"ADC1", NULL, "AMIC1"},
+ {"ADC2_INP2", NULL, "AMIC2"},
+ {"ADC2_INP3", NULL, "AMIC3"},
+
+ /* TODO: Fix this */
+ {"IIR1", NULL, "IIR1 INP1 MUX"},
+ {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"IIR2", NULL, "IIR2 INP1 MUX"},
+ {"IIR2 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR2 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+ {"MIC BIAS External", NULL, "INT_LDO_H"},
+ {"MIC BIAS External2", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
+};
+
+static int msm8x16_wcd_hphr_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget msm8x16_wcd_dapm_widgets[] = {
+ /*RX stuff */
+ SND_SOC_DAPM_OUTPUT("EAR"),
+
+ SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, msm8x16_wcd_codec_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER("EAR_S", SND_SOC_NOPM, 0, 0,
+ ear_pa_switch, ARRAY_SIZE(ear_pa_switch)),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+ SND_SOC_DAPM_PGA_E("HPHL PA", MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN,
+ 5, 0, NULL, 0,
+ msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, hphl_mux),
+
+ SND_SOC_DAPM_MIXER_E("HPHL DAC",
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm8x16_wcd_hphl_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_PGA_E("HPHR PA", MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN,
+ 4, 0, NULL, 0,
+ msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, hphr_mux),
+
+ SND_SOC_DAPM_MIXER_E("HPHR DAC",
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm8x16_wcd_hphr_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0,
+ spkr_switch, ARRAY_SIZE(spkr_switch)),
+
+ /* Speaker */
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+
+ SND_SOC_DAPM_PGA_E("SPK PA", MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL,
+ 6, 0, NULL, 0, msm8x16_wcd_codec_enable_spk_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX1 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL, 0,
+ msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_REG|
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD|
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX2 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL, 0,
+ msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_REG|
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX1 MIX2",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 MIX2",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 2, 0, msm8x16_wcd_codec_enable_dig_clk, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 CHAIN", MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx1_mix2_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix2_inp1_mux),
+
+ SND_SOC_DAPM_SUPPLY("MICBIAS_REGULATOR", SND_SOC_NOPM,
+ ON_DEMAND_MICBIAS, 0,
+ msm8x16_wcd_codec_enable_on_demand_supply,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("CP", MSM8X16_WCD_A_ANALOG_NCP_EN, 0, 0,
+ msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("EAR CP", MSM8X16_WCD_A_ANALOG_NCP_EN, 4, 0,
+ msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM,
+ 0, 0, msm8x16_wcd_codec_enable_rx_bias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("SPK_RX_BIAS", SND_SOC_NOPM, 0, 0,
+ msm8x16_wcd_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ /* TX */
+
+ SND_SOC_DAPM_SUPPLY_S("CDC_CONN", -2, MSM8X16_WCD_A_CDC_CLK_OTHR_CTL,
+ 2, 0, NULL, 0),
+
+ SND_SOC_DAPM_INPUT("AMIC1"),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS Internal1",
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN, 7, 0,
+ msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS Internal2",
+ MSM8X16_WCD_A_ANALOG_MICB_2_EN, 7, 0,
+ msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_SUPPLY("MIC BIAS Internal3",
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN, 7, 0,
+ msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC1", NULL, MSM8X16_WCD_A_ANALOG_TX_1_EN, 7, 0,
+ msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP2",
+ NULL, MSM8X16_WCD_A_ANALOG_TX_2_EN, 7, 0,
+ msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_ADC_E("ADC2_INP3",
+ NULL, MSM8X16_WCD_A_ANALOG_TX_3_EN, 7, 0,
+ msm8x16_wcd_codec_enable_adc, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER("ADC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+ SND_SOC_DAPM_MIXER("ADC3", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("ADC2 MUX", SND_SOC_NOPM, 0, 0,
+ &tx_adc2_mux),
+
+ SND_SOC_DAPM_SUPPLY("MIC BIAS External",
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN, 7, 0,
+ msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("MIC BIAS External2",
+ MSM8X16_WCD_A_ANALOG_MICB_2_EN, 7, 0,
+ msm8x16_wcd_codec_enable_micbias, SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+
+ SND_SOC_DAPM_INPUT("AMIC3"),
+
+ SND_SOC_DAPM_MUX_E("DEC1 MUX",
+ MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL, 0, 0,
+ &dec1_mux, msm8x16_wcd_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX_E("DEC2 MUX",
+ MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL, 1, 0,
+ &dec2_mux, msm8x16_wcd_codec_enable_dec,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+
+ SND_SOC_DAPM_INPUT("AMIC2"),
+
+ SND_SOC_DAPM_AIF_OUT("I2S TX1", "AIF1 Capture", 0, SND_SOC_NOPM,
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX2", "AIF1 Capture", 0, SND_SOC_NOPM,
+ 0, 0),
+ SND_SOC_DAPM_AIF_OUT("I2S TX3", "AIF1 Capture", 0, SND_SOC_NOPM,
+ 0, 0),
+
+
+ /* Digital Mic Inputs */
+ SND_SOC_DAPM_ADC_E("DMIC1", NULL, SND_SOC_NOPM, 0, 0,
+ msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_ADC_E("DMIC2", NULL, SND_SOC_NOPM, 0, 0,
+ msm8x16_wcd_codec_enable_dmic, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ /* Sidetone */
+ SND_SOC_DAPM_MUX("IIR1 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir1_inp1_mux),
+ SND_SOC_DAPM_PGA("IIR1",
+ MSM8X16_WCD_A_CDC_CLK_SD_CTL, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("IIR2 INP1 MUX", SND_SOC_NOPM, 0, 0, &iir2_inp1_mux),
+ SND_SOC_DAPM_PGA("IIR2",
+ MSM8X16_WCD_A_CDC_CLK_SD_CTL, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("TX_I2S_CLK",
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL, 4, 0,
+ NULL, 0),
+};
+
+static struct snd_soc_codec_driver msm8x16_wcd_codec = {
+ .probe = msm8x16_wcd_codec_probe,
+ .remove = msm8x16_wcd_codec_remove,
+ .read = msm8x16_wcd_read,
+ .write = msm8x16_wcd_write,
+ .reg_cache_size = MSM8X16_WCD_CACHE_SIZE,
+ .reg_cache_default = msm8x16_wcd_reset_reg_defaults,
+ .reg_word_size = 1,
+ .controls = msm8x16_wcd_snd_controls,
+ .num_controls = ARRAY_SIZE(msm8x16_wcd_snd_controls),
+ .dapm_widgets = msm8x16_wcd_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm8x16_wcd_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+};
+
+static int msm8x16_wcd_codec_parse_dt(struct platform_device *pdev,
+ struct wcd_chip *chip)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+ u32 res[2];
+
+ ret = of_property_read_u32_array(np, "reg", res, 2);
+ if (ret < 0)
+ return ret;
+
+ chip->analog_base = res[0];
+
+ chip->digital_map = syscon_regmap_lookup_by_phandle(np, "digital");
+ if (IS_ERR(chip->digital_map))
+ return PTR_ERR(chip->digital_map);
+
+ chip->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(chip->vddio)) {
+ dev_err(dev, "Failed to get vdd supply\n");
+ return PTR_ERR(chip->vddio);
+ }
+
+ chip->vdd_pa = devm_regulator_get(dev, "vdd-pa");
+ if (IS_ERR(chip->vdd_pa)) {
+ dev_err(dev, "Failed to get vdd supply\n");
+ return PTR_ERR(chip->vdd_pa);
+ }
+
+ chip->vdd_mic_bias = devm_regulator_get(dev, "vdd-mic-bias");
+ if (IS_ERR(chip->vdd_mic_bias)) {
+ dev_err(dev, "Failed to get vdd micbias supply\n");
+ return PTR_ERR(chip->vdd_mic_bias);
+ }
+
+ chip->mclk = devm_clk_get(dev, "mclk");
+
+ return 0;
+}
+
+static int wcd_probe(struct platform_device *pdev)
+{
+ struct wcd_chip *chip;
+ struct device *dev = &pdev->dev;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->analog_map = dev_get_regmap(dev->parent, NULL);
+ if (!chip->analog_map)
+ return -ENXIO;
+
+ msm8x16_wcd_codec_parse_dt(pdev, chip);
+
+ clk_set_rate(chip->mclk, 9600000);
+ clk_prepare_enable(chip->mclk);
+
+ dev_set_drvdata(dev, chip);
+
+ return snd_soc_register_codec(dev, &msm8x16_wcd_codec,
+ msm8x16_wcd_codec_dai,
+ ARRAY_SIZE(msm8x16_wcd_codec_dai));
+}
+
+static int wcd_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id wcd_match_table[] = {
+ { .compatible = "qcom,apq8016-wcd-codec" },
+ { .compatible = "qcom,msm8x16-wcd-codec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd_match_table);
+
+static struct platform_driver wcd_driver = {
+ .driver = {
+ .name = "msm8x16-wcd-codec",
+ .of_match_table = wcd_match_table,
+ },
+ .probe = wcd_probe,
+ .remove = wcd_remove,
+};
+module_platform_driver(wcd_driver);
+
+MODULE_ALIAS("platform:spmi-wcd-codec");
+MODULE_DESCRIPTION("SPMI PMIC WCD codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8x16-wcd.h b/sound/soc/codecs/msm8x16-wcd.h
new file mode 100644
index 0000000000000..ad4c9d0c5ae66
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd.h
@@ -0,0 +1,234 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM8X16_WCD_H
+#define MSM8X16_WCD_H
+
+#include <linux/types.h>
+
+#define MSM8X16_WCD_NUM_REGISTERS 0x6FF
+#define MSM8X16_WCD_MAX_REGISTER (MSM8X16_WCD_NUM_REGISTERS-1)
+#define MSM8X16_WCD_CACHE_SIZE MSM8X16_WCD_NUM_REGISTERS
+#define MSM8X16_WCD_NUM_IRQ_REGS 2
+#define MAX_REGULATOR 7
+#define MSM8X16_WCD_REG_VAL(reg, val) {reg, 0, val}
+#define MSM8X16_TOMBAK_LPASS_AUDIO_CORE_DIG_CODEC_CLK_SEL 0xFE03B004
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CMD_RCGR 0x0181C09C
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CFG_RCGR 0x0181C0A0
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_M 0x0181C0A4
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_N 0x0181C0A8
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_D 0x0181C0AC
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CBCR 0x0181C0B0
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_AHB_CBCR 0x0181C0B4
+
+#define MSM8X16_CODEC_NAME "msm8x16_wcd_codec"
+
+#define MSM8X16_WCD_IS_DIGITAL_REG(reg) \
+ (((reg >= 0x200) && (reg <= 0x4FF)) ? 1 : 0)
+#define MSM8X16_WCD_IS_TOMBAK_REG(reg) \
+ (((reg >= 0x000) && (reg <= 0x1FF)) ? 1 : 0)
+/*
+ * MCLK activity indicators during suspend and resume call
+ */
+#define MCLK_SUS_DIS 1
+#define MCLK_SUS_RSC 2
+#define MCLK_SUS_NO_ACT 3
+
+#define NUM_DECIMATORS 2
+
+extern const u8 msm8x16_wcd_reg_readable[MSM8X16_WCD_CACHE_SIZE];
+extern const u8 msm8x16_wcd_reg_readonly[MSM8X16_WCD_CACHE_SIZE];
+extern u8 msm8x16_wcd_reset_reg_defaults[MSM8X16_WCD_CACHE_SIZE];
+
+enum msm8x16_wcd_pid_current {
+ MSM8X16_WCD_PID_MIC_2P5_UA,
+ MSM8X16_WCD_PID_MIC_5_UA,
+ MSM8X16_WCD_PID_MIC_10_UA,
+ MSM8X16_WCD_PID_MIC_20_UA,
+};
+
+struct msm8x16_wcd_reg_mask_val {
+ u16 reg;
+ u8 mask;
+ u8 val;
+};
+
+enum msm8x16_wcd_mbhc_analog_pwr_cfg {
+ MSM8X16_WCD_ANALOG_PWR_COLLAPSED = 0,
+ MSM8X16_WCD_ANALOG_PWR_ON,
+ MSM8X16_WCD_NUM_ANALOG_PWR_CONFIGS,
+};
+
+/* Number of input and output I2S port */
+enum {
+ MSM8X16_WCD_RX1 = 0,
+ MSM8X16_WCD_RX2,
+ MSM8X16_WCD_RX3,
+ MSM8X16_WCD_RX_MAX,
+};
+
+enum {
+ MSM8X16_WCD_TX1 = 0,
+ MSM8X16_WCD_TX2,
+ MSM8X16_WCD_TX3,
+ MSM8X16_WCD_TX4,
+ MSM8X16_WCD_TX_MAX,
+};
+
+enum {
+ /* INTR_REG 0 - Digital Periph */
+ MSM8X16_WCD_IRQ_SPKR_CNP = 0,
+ MSM8X16_WCD_IRQ_SPKR_CLIP,
+ MSM8X16_WCD_IRQ_SPKR_OCP,
+ MSM8X16_WCD_IRQ_MBHC_INSREM_DET1,
+ MSM8X16_WCD_IRQ_MBHC_RELEASE,
+ MSM8X16_WCD_IRQ_MBHC_PRESS,
+ MSM8X16_WCD_IRQ_MBHC_INSREM_DET,
+ MSM8X16_WCD_IRQ_MBHC_HS_DET,
+ /* INTR_REG 1 - Analog Periph */
+ MSM8X16_WCD_IRQ_EAR_OCP,
+ MSM8X16_WCD_IRQ_HPHR_OCP,
+ MSM8X16_WCD_IRQ_HPHL_OCP,
+ MSM8X16_WCD_IRQ_EAR_CNP,
+ MSM8X16_WCD_IRQ_HPHR_CNP,
+ MSM8X16_WCD_IRQ_HPHL_CNP,
+ MSM8X16_WCD_NUM_IRQS,
+};
+
+enum wcd_notify_event {
+ WCD_EVENT_INVALID,
+ /* events for micbias ON and OFF */
+ WCD_EVENT_PRE_MICBIAS_2_OFF,
+ WCD_EVENT_POST_MICBIAS_2_OFF,
+ WCD_EVENT_PRE_MICBIAS_2_ON,
+ WCD_EVENT_POST_MICBIAS_2_ON,
+ /* events for PA ON and OFF */
+ WCD_EVENT_PRE_HPHL_PA_ON,
+ WCD_EVENT_POST_HPHL_PA_OFF,
+ WCD_EVENT_PRE_HPHR_PA_ON,
+ WCD_EVENT_POST_HPHR_PA_OFF,
+ WCD_EVENT_LAST,
+};
+
+enum {
+ ON_DEMAND_MICBIAS = 0,
+ ON_DEMAND_SUPPLIES_MAX,
+};
+
+/*
+ * The delay list is per codec HW specification.
+ * Please add delay in the list in the future instead
+ * of magic number
+ */
+enum {
+ CODEC_DELAY_1_MS = 1000,
+ CODEC_DELAY_1_1_MS = 1100,
+};
+#if 0
+struct msm8x16_wcd_regulator {
+ const char *name;
+ int min_uv;
+ int max_uv;
+ int optimum_ua;
+ bool ondemand;
+ struct regulator *regulator;
+};
+
+struct msm8916_asoc_mach_data {
+ int codec_type;
+ int ext_pa;
+ int us_euro_gpio;
+ int mclk_freq;
+ int lb_mode;
+ atomic_t mclk_rsc_ref;
+ atomic_t mclk_enabled;
+ struct mutex cdc_mclk_mutex;
+ struct delayed_work disable_mclk_work;
+ struct afe_digital_clk_cfg digital_cdc_clk;
+};
+
+struct msm8x16_wcd_pdata {
+ int irq;
+ int irq_base;
+ int num_irqs;
+ int reset_gpio;
+ void *msm8x16_wcd_ahb_base_vaddr;
+ struct wcd9xxx_micbias_setting micbias;
+ struct msm8x16_wcd_regulator regulator[MAX_REGULATOR];
+ u32 mclk_rate;
+};
+
+enum msm8x16_wcd_micbias_num {
+ MSM8X16_WCD_MICBIAS1 = 0,
+};
+
+struct msm8x16_wcd {
+ struct device *dev;
+ struct mutex io_lock;
+ u8 version;
+
+ int reset_gpio;
+ int (*read_dev)(struct snd_soc_codec *codec,
+ unsigned short reg);
+ int (*write_dev)(struct snd_soc_codec *codec,
+ unsigned short reg, u8 val);
+
+ u32 num_of_supplies;
+ struct regulator_bulk_data *supplies;
+
+ u8 idbyte[4];
+
+ int num_irqs;
+ u32 mclk_rate;
+ char __iomem *dig_base;
+};
+
+struct on_demand_supply {
+ struct regulator *supply;
+ atomic_t ref;
+};
+
+struct msm8x16_wcd_priv {
+ struct snd_soc_codec *codec;
+ u16 pmic_rev;
+ u32 adc_count;
+ u32 rx_bias_count;
+ s32 dmic_1_2_clk_cnt;
+ u32 mute_mask;
+ bool mclk_enabled;
+ bool clock_active;
+ bool config_mode_active;
+ bool spk_boost_set;
+ bool ear_pa_boost_set;
+ bool dec_active[NUM_DECIMATORS];
+ struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
+ /* mbhc module */
+ struct wcd_mbhc mbhc;
+ struct blocking_notifier_head notifier;
+
+};
+
+extern int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
+ bool dapm);
+
+extern int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
+ struct wcd_mbhc_config *mbhc_cfg);
+
+extern void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec);
+
+extern int msm8x16_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nblock);
+
+extern int msm8x16_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nblock);
+#endif
+#endif
+
diff --git a/sound/soc/codecs/msm8x16_wcd_registers.h b/sound/soc/codecs/msm8x16_wcd_registers.h
new file mode 100644
index 0000000000000..03d92c844e491
--- /dev/null
+++ b/sound/soc/codecs/msm8x16_wcd_registers.h
@@ -0,0 +1,518 @@
+ /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM8X16_WCD_REGISTERS_H
+#define MSM8X16_WCD_REGISTERS_H
+
+#define MSM8X16_WCD_A_DIGITAL_REVISION1 (0x000)
+#define MSM8X16_WCD_A_DIGITAL_REVISION1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_REVISION2 (0x001)
+#define MSM8X16_WCD_A_DIGITAL_REVISION2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_TYPE (0x004)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_TYPE__POR (0x23)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE (0x005)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_INT_RT_STS (0x010)
+#define MSM8X16_WCD_A_DIGITAL_INT_RT_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE (0x011)
+#define MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE__POR (0xFF)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH (0x012)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH__POR (0xFF)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW (0x013)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR (0x014)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_SET (0x015)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_SET__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_CLR (0x016)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_CLR__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS (0x018)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS (0x019)
+#define MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_MID_SEL (0x01A)
+#define MSM8X16_WCD_A_DIGITAL_INT_MID_SEL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_PRIORITY (0x01B)
+#define MSM8X16_WCD_A_DIGITAL_INT_PRIORITY__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_GPIO_MODE (0x040)
+#define MSM8X16_WCD_A_DIGITAL_GPIO_MODE__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE (0x041)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA (0x042)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PIN_STATUS (0x043)
+#define MSM8X16_WCD_A_DIGITAL_PIN_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL (0x044)
+#define MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL (0x046)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL (0x048)
+#define MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL (0x049)
+#define MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL (0x04A)
+#define MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL (0x050)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL__POR (0x02)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL (0x051)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL__POR (0x02)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL (0x052)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL (0x053)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL (0x054)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL (0x055)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL (0x056)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1 (0x058)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2 (0x059)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3 (0x05A)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0 (0x05B)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1 (0x05C)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2 (0x05D)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3 (0x05E)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL (0x068)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_EN (0x069)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_EN__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_0 (0x070)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_0__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_1 (0x071)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_2 (0x072)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SEC_ACCESS (0x0D0)
+#define MSM8X16_WCD_A_DIGITAL_SEC_ACCESS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL1 (0x0D8)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL2 (0x0D9)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL2__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3 (0x0DA)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3__POR (0x05)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4 (0x0DB)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST1 (0x0E0)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST_VAL (0x0E1)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST_VAL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_NUM (0x0F0)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_NUM__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_CTRL (0x0F1)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_CTRL__POR (0x00)
+
+#define MSM8X16_WCD_A_ANALOG_REVISION1 (0x100)
+#define MSM8X16_WCD_A_ANALOG_REVISION1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION2 (0x101)
+#define MSM8X16_WCD_A_ANALOG_REVISION2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION3 (0x102)
+#define MSM8X16_WCD_A_ANALOG_REVISION3__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION4 (0x103)
+#define MSM8X16_WCD_A_ANALOG_REVISION4__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_TYPE (0x104)
+#define MSM8X16_WCD_A_ANALOG_PERPH_TYPE__POR (0x23)
+#define MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE (0x105)
+#define MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE__POR (0x09)
+#define MSM8X16_WCD_A_ANALOG_INT_RT_STS (0x110)
+#define MSM8X16_WCD_A_ANALOG_INT_RT_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_SET_TYPE (0x111)
+#define MSM8X16_WCD_A_ANALOG_INT_SET_TYPE__POR (0x3F)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH (0x112)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH__POR (0x3F)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW (0x113)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR (0x114)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_SET (0x115)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_SET__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_CLR (0x116)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_CLR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS (0x118)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_PENDING_STS (0x119)
+#define MSM8X16_WCD_A_ANALOG_INT_PENDING_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_MID_SEL (0x11A)
+#define MSM8X16_WCD_A_ANALOG_INT_MID_SEL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_PRIORITY (0x11B)
+#define MSM8X16_WCD_A_ANALOG_INT_PRIORITY__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_EN (0x140)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_EN__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_VAL (0x141)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_VAL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_CTL (0x142)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS (0x143)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS__POR (0x49)
+#define MSM8X16_WCD_A_ANALOG_MICB_2_EN (0x144)
+#define MSM8X16_WCD_A_ANALOG_MICB_2_EN__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL_2 (0x145)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL_2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL (0x146)
+#define MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1 (0x147)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1__POR (0x35)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2 (0x150)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2__POR (0x08)
+#define MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL (0x151)
+#define MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER (0x152)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER__POR (0x98)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL (0x153)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL (0x154)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL (0x155)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL__POR (0x40)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL (0x156)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL__POR (0x61)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL (0x157)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL__POR (0x80)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT (0x158)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT (0x159)
+#define MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TX_1_EN (0x160)
+#define MSM8X16_WCD_A_ANALOG_TX_1_EN__POR (0x03)
+#define MSM8X16_WCD_A_ANALOG_TX_2_EN (0x161)
+#define MSM8X16_WCD_A_ANALOG_TX_2_EN__POR (0x03)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1 (0x162)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1__POR (0xBF)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2 (0x163)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2__POR (0x8C)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL (0x164)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS (0x165)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS__POR (0x6B)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV (0x166)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV__POR (0x51)
+#define MSM8X16_WCD_A_ANALOG_TX_3_EN (0x167)
+#define MSM8X16_WCD_A_ANALOG_TX_3_EN__POR (0x02)
+#define MSM8X16_WCD_A_ANALOG_NCP_EN (0x180)
+#define MSM8X16_WCD_A_ANALOG_NCP_EN__POR (0x26)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLK (0x181)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLK__POR (0x23)
+#define MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH (0x182)
+#define MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH__POR (0x5B)
+#define MSM8X16_WCD_A_ANALOG_NCP_FBCTRL (0x183)
+#define MSM8X16_WCD_A_ANALOG_NCP_FBCTRL__POR (0x08)
+#define MSM8X16_WCD_A_ANALOG_NCP_BIAS (0x184)
+#define MSM8X16_WCD_A_ANALOG_NCP_BIAS__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_NCP_VCTRL (0x185)
+#define MSM8X16_WCD_A_ANALOG_NCP_VCTRL__POR (0x24)
+#define MSM8X16_WCD_A_ANALOG_NCP_TEST (0x186)
+#define MSM8X16_WCD_A_ANALOG_NCP_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR (0x187)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR__POR (0xD5)
+#define MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER (0x190)
+#define MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER__POR (0xE8)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL (0x191)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL__POR (0xCF)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT (0x192)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT__POR (0x6E)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC (0x193)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC__POR (0x10)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA (0x194)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA__POR (0x5A)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP (0x195)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP__POR (0x69)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP (0x196)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN (0x197)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN__POR (0x80)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL (0x198)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL__POR (0xDA)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME (0x199)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME__POR (0x16)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST (0x19A)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL (0x19B)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST (0x19C)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL (0x19D)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_CTL (0x19E)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_CTL___POR (0x12)
+#define MSM8X16_WCD_A_ANALOG_RX_ATEST (0x19F)
+#define MSM8X16_WCD_A_ANALOG_RX_ATEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS (0x1A0)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS__POR (0x0C)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS (0x1A1)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL (0x1B0)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL__POR (0x83)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET (0x1B1)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET__POR (0x91)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL (0x1B2)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET (0x1B3)
+#define MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET__POR (0x4D)
+#define MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL (0x1B4)
+#define MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL__POR (0xE1)
+#define MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL (0x1B5)
+#define MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL__POR (0x1E)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC (0x1B6)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC__POR (0xCB)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG (0x1B7)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT (0x1C0)
+#define MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT__POR (0x02)
+#define MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE (0x1C1)
+#define MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE__POR (0x14)
+#define MSM8X16_WCD_A_ANALOG_BYPASS_MODE (0x1C2)
+#define MSM8X16_WCD_A_ANALOG_BYPASS_MODE__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL (0x1C3)
+#define MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL__POR (0x1F)
+#define MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO (0x1C4)
+#define MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO__POR (0x8C)
+#define MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE (0x1C5)
+#define MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE__POR (0xC0)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1 (0x1C6)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST_2 (0x1C7)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST_2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS (0x1C8)
+#define MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS (0x1C9)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR (0x1CE)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL (0x1CF)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SEC_ACCESS (0x1D0)
+#define MSM8X16_WCD_A_ANALOG_SEC_ACCESS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL1 (0x1D8)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL2 (0x1D9)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL2__POR (0x01)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3 (0x1DA)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3__POR (0x05)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4 (0x1DB)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST1 (0x1E0)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST_VAL (0x1E1)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST_VAL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_NUM (0x1F0)
+#define MSM8X16_WCD_A_ANALOG_TRIM_NUM__POR (0x04)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL1 (0x1F1)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL2 (0x1F2)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL3 (0x1F3)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL3__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL4 (0x1F4)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL4__POR (0x00)
+
+/* Digital part */
+#define MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL (0x200)
+#define MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL (0x204)
+#define MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL (0x208)
+#define MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL (0x20C)
+#define MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL__POR (0x13)
+#define MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL (0x210)
+#define MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL__POR (0x13)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL (0x214)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL (0x218)
+#define MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_CTL (0x21C)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_CTL__POR (0x04)
+#define MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL (0x220)
+#define MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_MCLK_CTL (0x224)
+#define MSM8X16_WCD_A_CDC_CLK_MCLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_PDM_CTL (0x228)
+#define MSM8X16_WCD_A_CDC_CLK_PDM_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_SD_CTL (0x22C)
+#define MSM8X16_WCD_A_CDC_CLK_SD_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B1_CTL (0x240)
+#define MSM8X16_WCD_A_CDC_RX1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B1_CTL (0x260)
+#define MSM8X16_WCD_A_CDC_RX2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B1_CTL (0x280)
+#define MSM8X16_WCD_A_CDC_RX3_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B2_CTL (0x244)
+#define MSM8X16_WCD_A_CDC_RX1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B2_CTL (0x264)
+#define MSM8X16_WCD_A_CDC_RX2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B2_CTL (0x284)
+#define MSM8X16_WCD_A_CDC_RX3_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B3_CTL (0x248)
+#define MSM8X16_WCD_A_CDC_RX1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B3_CTL (0x268)
+#define MSM8X16_WCD_A_CDC_RX2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B3_CTL (0x288)
+#define MSM8X16_WCD_A_CDC_RX3_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B4_CTL (0x24C)
+#define MSM8X16_WCD_A_CDC_RX1_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B4_CTL (0x26C)
+#define MSM8X16_WCD_A_CDC_RX2_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B4_CTL (0x28C)
+#define MSM8X16_WCD_A_CDC_RX3_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B5_CTL (0x250)
+#define MSM8X16_WCD_A_CDC_RX1_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX2_B5_CTL (0x270)
+#define MSM8X16_WCD_A_CDC_RX2_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX3_B5_CTL (0x290)
+#define MSM8X16_WCD_A_CDC_RX3_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX1_B6_CTL (0x254)
+#define MSM8X16_WCD_A_CDC_RX1_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B6_CTL (0x274)
+#define MSM8X16_WCD_A_CDC_RX2_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B6_CTL (0x294)
+#define MSM8X16_WCD_A_CDC_RX3_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL (0x258)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL (0x278)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL (0x298)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL (0x25C)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL (0x27C)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL (0x29C)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE (0x2A0)
+#define MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TOP_CTL (0x2A4)
+#define MSM8X16_WCD_A_CDC_TOP_CTL__POR (0x01)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL (0x2E0)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL (0x2E4)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG (0x2E8)
+#define MSM8X16_WCD_A_CDC_DEBUG_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG (0x2EC)
+#define MSM8X16_WCD_A_CDC_DEBUG_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG (0x2F0)
+#define MSM8X16_WCD_A_CDC_DEBUG_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL (0x300)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL (0x340)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL (0x304)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL (0x344)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL (0x308)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL (0x348)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL (0x30C)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL (0x34C)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL (0x310)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL (0x350)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL (0x314)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL (0x354)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL (0x318)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL (0x358)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL (0x31C)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL (0x35C)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_CTL (0x320)
+#define MSM8X16_WCD_A_CDC_IIR1_CTL__POR (0x40)
+#define MSM8X16_WCD_A_CDC_IIR2_CTL (0x360)
+#define MSM8X16_WCD_A_CDC_IIR2_CTL__POR (0x40)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL (0x324)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL (0x364)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL (0x328)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL (0x368)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL (0x32C)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL (0x36C)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL (0x380)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL (0x384)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL (0x388)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL (0x38C)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL (0x390)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL (0x394)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL (0x398)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL (0x39C)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL (0x3A0)
+#define MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL (0x3A8)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL (0x3AC)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL (0x3B0)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL (0x3B4)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL (0x3B8)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL (0x3BC)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL (0x3C0)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL (0x3C4)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL (0x3C8)
+#define MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER (0x480)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER (0x4A0)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN (0x484)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN (0x4A4)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG (0x488)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG (0x4A8)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_MUX_CTL (0x48C)
+#define MSM8X16_WCD_A_CDC_TX1_MUX_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_MUX_CTL (0x4AC)
+#define MSM8X16_WCD_A_CDC_TX2_MUX_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL (0x490)
+#define MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL__POR (0x03)
+#define MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL (0x4B0)
+#define MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL__POR (0x03)
+#define MSM8X16_WCD_A_CDC_TX1_DMIC_CTL (0x494)
+#define MSM8X16_WCD_A_CDC_TX1_DMIC_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_DMIC_CTL (0x4B4)
+#define MSM8X16_WCD_A_CDC_TX2_DMIC_CTL__POR (0x00)
+#endif
diff --git a/sound/soc/qcom/Kconfig b/sound/soc/qcom/Kconfig
index 8ec9a074b38bd..40a884e34910e 100644
--- a/sound/soc/qcom/Kconfig
+++ b/sound/soc/qcom/Kconfig
@@ -43,3 +43,65 @@ config SND_SOC_APQ8016_SBC
Support for Qualcomm Technologies LPASS audio block in
APQ8016 SOC-based systems.
Say Y if you want to use audio devices on MI2S.
+
+menu "QCOM QDSP Audio support"
+
+config SND_MSM_DAI_SOC
+ tristate
+
+config SND_MSM_SOC
+ tristate "SoC Audio for the MSM series chips"
+ select SND_MSM_DAI_SOC
+ select SND_MSM_SOC_MSM7K
+ default n
+ help
+ To add support for ALSA PCM driver for MSM board.
+
+config SND_SOC_MSM_QDSP6_HDMI_AUDIO
+ tristate "Soc QDSP6 HDMI Audio DAI driver"
+ default n
+ help
+ To support HDMI Audio on MSM8960 over QDSP6.
+
+config SND_SOC_MSM_QDSP6_INTF
+ bool "SoC Q6 audio driver for MSM8960"
+ default n
+ help
+ To add support for SoC audio on MSM8960.
+
+config SND_SOC_MSM_QDSP6V2_INTF
+ bool "SoC Q6 audio driver for MSM8974"
+ help
+ To add support for SoC audio on MSM8974.
+ This will enable all the platform specific
+ interactions towards DSP. It includes asm,
+ adm and afe interfaces on the DSP.
+
+
+config SND_SOC_QDSP6
+ tristate "SoC ALSA audio driver for QDSP6"
+ select SND_SOC_MSM_QDSP6_INTF
+ default n
+ help
+ To add support for MSM QDSP6 Soc Audio.
+
+config SND_SOC_QDSP6V2
+ tristate "SoC ALSA audio driver for QDSP6V2"
+ select SND_SOC_MSM_QDSP6V2_INTF
+ help
+ To add support for MSM QDSP6V2 Soc Audio.
+ This will enable sound soc platform specific
+ audio drivers. This includes q6asm, q6adm,
+ q6afe interfaces to DSP using apr.
+
+config SND_SOC_MSM8960
+ tristate "SoC Machine driver for MSM8960 and APQ8064 boards"
+ select SND_SOC_QDSP6
+ select SND_SOC_MSM_STUB
+ select SND_SOC_MSM_HOSTLESS_PCM
+ select SND_SOC_MSM_QDSP6_HDMI_AUDIO if FB_MSM_HDMI_MSM_PANEL
+ default n
+ help
+ To add support for SoC audio on MSM8960 and APQ8064 boards
+
+endmenu
diff --git a/sound/soc/qcom/Makefile b/sound/soc/qcom/Makefile
index 79e5c50a8f71b..a313fdf8ce1c6 100644
--- a/sound/soc/qcom/Makefile
+++ b/sound/soc/qcom/Makefile
@@ -15,3 +15,12 @@ snd-soc-apq8016-sbc-objs := apq8016_sbc.o
obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
+
+# for MSM 8960 sound card driver
+obj-$(CONFIG_SND_SOC_MSM_QDSP6_INTF) += qdsp6/
+snd-soc-qdsp6-objs := msm-pcm-q6.o msm-pcm-routing.o msm-dai-fe.o
+obj-$(CONFIG_SND_SOC_MSM_QDSP6_HDMI_AUDIO) += msm-dai-q6-hdmi.o
+obj-$(CONFIG_SND_SOC_QDSP6) += snd-soc-qdsp6.o
+
+snd-soc-msm8960-objs := apq8064.o
+obj-$(CONFIG_SND_SOC_MSM8960) += snd-soc-msm8960.o
diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
index 1289543c8fb25..a295f254ea112 100644
--- a/sound/soc/qcom/apq8016_sbc.c
+++ b/sound/soc/qcom/apq8016_sbc.c
@@ -85,6 +85,15 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
return ERR_PTR(ret);
}
+ /* DAPM routes */
+ if (of_property_read_bool(node, "qcom,audio-routing")) {
+ ret = snd_soc_of_parse_audio_routing(card,
+ "qcom,audio-routing");
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+
/* Populate links */
num_links = of_get_child_count(node);
@@ -147,6 +156,17 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
return data;
}
+static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = {
+
+ //SND_SOC_DAPM_SUPPLY_S("MCLK", -1, SND_SOC_NOPM, 0, 0,
+ //msm8x16_mclk_event, SND_SOC_DAPM_POST_PMD), FIXME??
+ SND_SOC_DAPM_MIC("Handset Mic", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_MIC("Secondary Mic", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic1", NULL),
+ SND_SOC_DAPM_MIC("Digital Mic2", NULL),
+};
+
static int apq8016_sbc_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -159,6 +179,8 @@ static int apq8016_sbc_platform_probe(struct platform_device *pdev)
return -ENOMEM;
card->dev = dev;
+ card->dapm_widgets = apq8016_sbc_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(apq8016_sbc_dapm_widgets);
data = apq8016_sbc_parse_of(card);
if (IS_ERR(data)) {
dev_err(&pdev->dev, "Error resolving dai links: %ld\n",
diff --git a/sound/soc/qcom/apq8064.c b/sound/soc/qcom/apq8064.c
new file mode 100644
index 0000000000000..473810999e636
--- /dev/null
+++ b/sound/soc/qcom/apq8064.c
@@ -0,0 +1,170 @@
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+
+#include <linux/pm_runtime.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include "msm-pcm-routing.h"
+
+static int msm_hdmi_rx_ch = 2;
+static int hdmi_rate_variable;
+
+static const char *hdmi_rx_ch_text[] = {"Two", "Three", "Four", "Five",
+ "Six", "Seven", "Eight"};
+static const char * const hdmi_rate[] = {"Default", "Variable"};
+
+static const struct soc_enum msm_enum[] = {
+ SOC_ENUM_SINGLE_EXT(7, hdmi_rx_ch_text),
+ SOC_ENUM_SINGLE_EXT(2, hdmi_rate),
+};
+
+static int msm_hdmi_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__,
+ msm_hdmi_rx_ch);
+ ucontrol->value.integer.value[0] = msm_hdmi_rx_ch - 2;
+ return 0;
+}
+
+static int msm_hdmi_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ msm_hdmi_rx_ch = ucontrol->value.integer.value[0] + 2;
+
+ pr_debug("%s: msm_hdmi_rx_ch = %d\n", __func__,
+ msm_hdmi_rx_ch);
+ return 1;
+}
+
+static int msm_hdmi_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ hdmi_rate_variable = ucontrol->value.integer.value[0];
+ pr_debug("%s: hdmi_rate_variable = %d\n", __func__, hdmi_rate_variable);
+ return 0;
+}
+
+static int msm_hdmi_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = hdmi_rate_variable;
+ return 0;
+}
+
+static const struct snd_kcontrol_new tabla_msm_controls[] = {
+ SOC_ENUM_EXT("HDMI_RX Channels", msm_enum[0],
+ msm_hdmi_rx_ch_get, msm_hdmi_rx_ch_put),
+ SOC_ENUM_EXT("HDMI RX Rate", msm_enum[1],
+ msm_hdmi_rate_get,
+ msm_hdmi_rate_put),
+};
+
+int msm_hdmi_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ pr_debug("%s channels->min %u channels->max %u ()\n", __func__,
+ channels->min, channels->max);
+
+ if (!hdmi_rate_variable)
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = msm_hdmi_rx_ch;
+ if (channels->max < 2)
+ channels->min = channels->max = 2;
+
+ return 0;
+}
+
+/* Digital audio interface glue - connects codec <---> CPU */
+static struct snd_soc_dai_link msm_dai[] = {
+ /* FrontEnd DAI Links */
+ {
+ .name = "MultiMedia1 PCM",
+ .stream_name = "MultiMedia1 Playback",
+ .cpu_dai_name = "MultiMedia1",
+ .platform_name = "soc:msm_pcm",
+ .dynamic = 1,
+ .dpcm_playback = 1,
+ .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .codec_name = "snd-soc-dummy",
+ .ignore_suspend = 1,
+ .ignore_pmdown_time = 1, /* this dainlink has playback support */
+ .be_id = MSM_FRONTEND_DAI_MULTIMEDIA1,
+ .be_hw_params_fixup = msm_hdmi_be_hw_params_fixup,
+
+ },
+ /* HDMI BACK END DAI Link */
+ {
+ .name = LPASS_BE_HDMI,
+ .stream_name = "HDMI Playback",
+ .cpu_dai_name = "HDMI",
+ .platform_name = "soc:msm_pcm_routing",
+ .codec_name = "hdmi-audio-codec.0.auto",
+ .codec_dai_name = "i2s-hifi",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_HDMI_RX,
+ .be_hw_params_fixup = msm_hdmi_be_hw_params_fixup,
+
+ },
+};
+
+static struct snd_soc_card snd_soc_card_msm = {
+ .name = "apq8064-tabla-snd-card",
+ .dai_link = msm_dai,
+ .num_links = ARRAY_SIZE(msm_dai),
+ .owner = THIS_MODULE,
+ .controls = tabla_msm_controls,
+ .num_controls = ARRAY_SIZE(tabla_msm_controls),
+};
+
+static int msm_snd_apq8064_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ snd_soc_card_msm.dev = &pdev->dev;
+ ret = snd_soc_register_card(&snd_soc_card_msm);
+ if (ret)
+ dev_err(&pdev->dev, "Error: snd_soc_register_card failed (%d)!\n", ret);
+
+ return ret;
+
+}
+
+static int msm_snd_apq8064_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id msm_snd_apq8064_dt_match[] = {
+ {.compatible = "qcom,snd-apq8064"},
+ {}
+};
+
+static struct platform_driver msm_snd_apq8064_driver = {
+ .probe = msm_snd_apq8064_probe,
+ .remove = msm_snd_apq8064_remove,
+ .driver = {
+ .name = "msm-snd-apq8064",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_snd_apq8064_dt_match,
+ },
+};
+module_platform_driver(msm_snd_apq8064_driver);
+/* Module information */
+
+MODULE_DESCRIPTION("ALSA SoC msm");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/qcom/msm-dai-fe.c b/sound/soc/qcom/msm-dai-fe.c
new file mode 100644
index 0000000000000..e0f3bbe6731b0
--- /dev/null
+++ b/sound/soc/qcom/msm-dai-fe.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static int multimedia_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm_fe_Multimedia_dai_ops = {
+ .startup = multimedia_startup,
+};
+
+static struct snd_soc_dai_driver msm_fe_dais[] = {
+ {
+ .name = "MultiMedia1",
+ .id = 1,
+ .playback = {
+ .stream_name = "MultiMedia1 Playback",
+ .rates = (SNDRV_PCM_RATE_8000_48000|
+ SNDRV_PCM_RATE_KNOT),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 6,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ },
+ .ops = &msm_fe_Multimedia_dai_ops,
+ },
+};
+
+static const struct snd_soc_component_driver msm_dai_fe_component = {
+ .name = "msm-dai-fe",
+};
+
+static int msm_fe_dai_dev_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = snd_soc_register_component(&pdev->dev, &msm_dai_fe_component, msm_fe_dais, 1);
+
+ return ret;
+}
+
+static int msm_fe_dai_dev_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_component(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_dai_fe_dt_match[] = {
+ {.compatible = "qcom,msm-dai-fe"},
+ {}
+};
+
+static struct platform_driver msm_fe_dai_driver = {
+ .probe = msm_fe_dai_dev_probe,
+ .remove = msm_fe_dai_dev_remove,
+ .driver = {
+ .name = "msm-dai-fe",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_fe_dt_match,
+ },
+};
+module_platform_driver(msm_fe_dai_driver);
+/* Module information */
+MODULE_DESCRIPTION("MSM Frontend DAI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/msm-dai-q6-hdmi.c b/sound/soc/qcom/msm-dai-q6-hdmi.c
new file mode 100644
index 0000000000000..064052fbc7f15
--- /dev/null
+++ b/sound/soc/qcom/msm-dai-q6-hdmi.c
@@ -0,0 +1,298 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/apr_audio.h>
+#include <sound/q6afe.h>
+#include <sound/q6adm.h>
+#include <sound/msm-dai-q6.h>
+#include <sound/msm_hdmi_audio.h>
+
+enum {
+ STATUS_PORT_STARTED, /* track if AFE port has started */
+ STATUS_MAX
+};
+
+struct msm_dai_q6_hdmi_dai_data {
+ DECLARE_BITMAP(status_mask, STATUS_MAX);
+ u32 rate;
+ u32 channels;
+ union afe_port_config port_config;
+};
+
+static int msm_dai_q6_hdmi_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+ int value = ucontrol->value.integer.value[0];
+ dai_data->port_config.hdmi_multi_ch.data_type = value;
+ pr_debug("%s: value = %d\n", __func__, value);
+ return 0;
+}
+
+static int msm_dai_q6_hdmi_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct msm_dai_q6_hdmi_dai_data *dai_data = kcontrol->private_data;
+ ucontrol->value.integer.value[0] =
+ dai_data->port_config.hdmi_multi_ch.data_type;
+ return 0;
+}
+
+/* HDMI format field for AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG command
+ * 0: linear PCM
+ * 1: non-linear PCM
+ */
+static const char *hdmi_format[] = {
+ "LPCM",
+ "Compr"
+};
+
+static const struct soc_enum hdmi_config_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, hdmi_format),
+};
+
+static const struct snd_kcontrol_new hdmi_config_controls[] = {
+ SOC_ENUM_EXT("HDMI RX Format", hdmi_config_enum[0],
+ msm_dai_q6_hdmi_format_get,
+ msm_dai_q6_hdmi_format_put),
+};
+/* Current implementation assumes hw_param is called once
+ * This may not be the case but what to do when ADM and AFE
+ * port are already opened and parameter changes
+ */
+static int msm_dai_q6_hdmi_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ u32 channel_allocation = 0;
+ u32 level_shift = 0; /* 0dB */
+ bool down_mix = FALSE;
+ int sample_rate = 48000;
+
+ dai_data->channels = params_channels(params);
+ dai_data->rate = params_rate(params);
+ dai_data->port_config.hdmi_multi_ch.reserved = 0;
+
+ dai_data->channels = 2;
+ dai_data->rate = 48000;
+ switch (dai_data->rate) {
+ case 48000:
+ sample_rate = HDMI_SAMPLE_RATE_48KHZ;
+ break;
+ case 44100:
+ sample_rate = HDMI_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 32000:
+ sample_rate = HDMI_SAMPLE_RATE_32KHZ;
+ break;
+ }
+
+ switch (dai_data->channels) {
+ case 2:
+ channel_allocation = 0;
+ dai_data->port_config.hdmi_multi_ch.channel_allocation =
+ channel_allocation;
+ break;
+ case 6:
+ channel_allocation = 0x0B;
+ dai_data->port_config.hdmi_multi_ch.channel_allocation =
+ channel_allocation;
+ break;
+ case 8:
+ channel_allocation = 0x1F;
+ dai_data->port_config.hdmi_multi_ch.channel_allocation =
+ channel_allocation;
+ break;
+ default:
+ dev_err(dai->dev, "invalid Channels = %u\n",
+ dai_data->channels);
+ return -EINVAL;
+ }
+ dev_info(dai->dev, "%s() num_ch = %u rate =%u"
+ " channel_allocation = %u data type = %d\n", __func__,
+ dai_data->channels,
+ dai_data->rate,
+ dai_data->port_config.hdmi_multi_ch.channel_allocation,
+ dai_data->port_config.hdmi_multi_ch.data_type);
+
+ return 0;
+}
+
+static void msm_dai_q6_hdmi_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ pr_info("%s: afe port not started. dai_data->status_mask"
+ " = %ld\n", __func__, *dai_data->status_mask);
+ return;
+ }
+
+ rc = afe_close(HDMI_RX); /* can block */
+
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+
+ pr_debug("%s: dai_data->status_mask = %ld\n", __func__,
+ *dai_data->status_mask);
+
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+}
+
+
+static int msm_dai_q6_hdmi_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_hdmi_dai_data *dai_data = dev_get_drvdata(dai->dev);
+ int rc = 0;
+ dai_data->rate = 48000;
+ dai_data->port_config.hdmi_multi_ch.channel_allocation = 0;
+ dai_data->port_config.hdmi_multi_ch.reserved = 0;
+ dai_data->port_config.hdmi_multi_ch.data_type = 0;
+
+ if (!test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ rc = afe_port_start(HDMI_RX, &dai_data->port_config,
+ dai_data->rate);
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to open AFE port %x\n",
+ HDMI_RX);
+ else
+ set_bit(STATUS_PORT_STARTED,
+ dai_data->status_mask);
+ }
+
+ return rc;
+}
+
+static int msm_dai_q6_hdmi_dai_probe(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_hdmi_dai_data *dai_data;
+ const struct snd_kcontrol_new *kcontrol;
+ int rc = 0;
+
+ dai_data = kzalloc(sizeof(struct msm_dai_q6_hdmi_dai_data),
+ GFP_KERNEL);
+
+ if (!dai_data) {
+ dev_err(dai->dev, "DAI-%d: fail to allocate dai data\n",
+ HDMI_RX);
+ rc = -ENOMEM;
+ } else
+ dev_set_drvdata(dai->dev, dai_data);
+
+ kcontrol = &hdmi_config_controls[0];
+
+ rc = snd_ctl_add(dai->component->card->snd_card,
+ snd_ctl_new1(kcontrol, dai_data));
+ return rc;
+}
+
+static int msm_dai_q6_hdmi_dai_remove(struct snd_soc_dai *dai)
+{
+ struct msm_dai_q6_hdmi_dai_data *dai_data;
+ int rc;
+
+ dai_data = dev_get_drvdata(dai->dev);
+
+ /* If AFE port is still up, close it */
+ if (test_bit(STATUS_PORT_STARTED, dai_data->status_mask)) {
+ rc = afe_close(HDMI_RX); /* can block */
+
+ if (IS_ERR_VALUE(rc))
+ dev_err(dai->dev, "fail to close AFE port\n");
+
+ clear_bit(STATUS_PORT_STARTED, dai_data->status_mask);
+ }
+ kfree(dai_data);
+ snd_soc_unregister_component(dai->dev);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm_dai_q6_hdmi_ops = {
+ .prepare = msm_dai_q6_hdmi_prepare,
+ .hw_params = msm_dai_q6_hdmi_hw_params,
+ .shutdown = msm_dai_q6_hdmi_shutdown,
+};
+
+static struct snd_soc_dai_driver msm_dai_q6_hdmi_hdmi_rx_dai = {
+ .playback = {
+ .stream_name = "HDMI Playback",
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 2,
+ .channels_max = 6,
+ .rate_max = 48000,
+ .rate_min = 48000,
+ },
+ .ops = &msm_dai_q6_hdmi_ops,
+ .probe = msm_dai_q6_hdmi_dai_probe,
+ .remove = msm_dai_q6_hdmi_dai_remove,
+ .id = HDMI_RX,
+ .name = "HDMI",
+};
+
+static const struct snd_soc_component_driver msm_hdmi_q6_component = {
+ .name = "msm-dai-q6-hdmi",
+};
+/* To do: change to register DAIs as batch */
+static int msm_dai_q6_hdmi_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+
+ dev_info(&pdev->dev, "dev name %s dev-id %d\n",
+ dev_name(&pdev->dev), pdev->id);
+
+ rc = snd_soc_register_component(&pdev->dev, &msm_hdmi_q6_component,
+ &msm_dai_q6_hdmi_hdmi_rx_dai, 1);
+ return rc;
+}
+
+static int msm_dai_q6_hdmi_dev_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_component(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_dai_hdmi_dt_match[] = {
+ {.compatible = "qcom,msm-dai-q6-hdmi"},
+ {}
+};
+static struct platform_driver msm_dai_q6_hdmi_driver = {
+ .probe = msm_dai_q6_hdmi_dev_probe,
+ .remove = msm_dai_q6_hdmi_dev_remove,
+ .driver = {
+ .name = "msm-dai-q6-hdmi",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_dai_hdmi_dt_match,
+ },
+};
+
+module_platform_driver(msm_dai_q6_hdmi_driver);
+/* Module information */
+MODULE_DESCRIPTION("MSM DSP HDMI DAI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/msm-pcm-q6.c b/sound/soc/qcom/msm-pcm-q6.c
new file mode 100644
index 0000000000000..68eaf60de7d16
--- /dev/null
+++ b/sound/soc/qcom/msm-pcm-q6.c
@@ -0,0 +1,851 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/moduleparam.h>
+#include <linux/time.h>
+#include <linux/wait.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <asm/dma.h>
+#include <linux/dma-mapping.h>
+
+#include <sound/q6afe.h>
+#include <sound/q6adm.h>
+#include <sound/msm-dai-q6.h>
+#include <sound/msm_hdmi_audio.h>
+#include "msm-pcm-q6.h"
+#include "msm-pcm-routing.h"
+
+static struct audio_locks the_locks;
+
+extern int q6_hdmi_prepare(struct snd_pcm_substream *substream);
+struct snd_msm {
+ struct snd_card *card;
+ struct snd_pcm *pcm;
+};
+
+#define PLAYBACK_NUM_PERIODS 8
+#define PLAYBACK_PERIOD_SIZE 2048
+#define CAPTURE_MIN_NUM_PERIODS 2
+#define CAPTURE_MAX_NUM_PERIODS 16
+#define CAPTURE_MAX_PERIOD_SIZE 4096
+#define CAPTURE_MIN_PERIOD_SIZE 320
+
+static struct snd_pcm_hardware msm_pcm_hardware_capture = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 4,
+ .buffer_bytes_max = CAPTURE_MAX_NUM_PERIODS *
+ CAPTURE_MAX_PERIOD_SIZE,
+ .period_bytes_min = CAPTURE_MIN_PERIOD_SIZE,
+ .period_bytes_max = CAPTURE_MAX_PERIOD_SIZE,
+ .periods_min = CAPTURE_MIN_NUM_PERIODS,
+ .periods_max = CAPTURE_MAX_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+static struct snd_pcm_hardware msm_pcm_hardware_playback = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .rates = SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT,
+ .rate_min = 8000,
+ .rate_max = 48000,
+ .channels_min = 1,
+ .channels_max = 2,
+ .buffer_bytes_max = PLAYBACK_NUM_PERIODS * PLAYBACK_PERIOD_SIZE,
+ .period_bytes_min = PLAYBACK_PERIOD_SIZE,
+ .period_bytes_max = PLAYBACK_PERIOD_SIZE,
+ .periods_min = PLAYBACK_NUM_PERIODS,
+ .periods_max = PLAYBACK_NUM_PERIODS,
+ .fifo_size = 0,
+};
+
+/* Conventional and unconventional sample rate supported */
+static unsigned int supported_sample_rates[] = {
+ 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
+};
+
+static uint32_t in_frame_info[CAPTURE_MAX_NUM_PERIODS][2];
+
+static struct snd_pcm_hw_constraint_list constraints_sample_rates = {
+ .count = ARRAY_SIZE(supported_sample_rates),
+ .list = supported_sample_rates,
+ .mask = 0,
+};
+
+static void msm_pcm_route_event_handler(enum msm_pcm_routing_event event,
+ void *priv_data)
+{
+ struct msm_audio *prtd = priv_data;
+
+ BUG_ON(!prtd);
+
+ pr_debug("%s: event %x\n", __func__, event);
+
+ switch (event) {
+ case MSM_PCM_RT_EVT_BUF_RECFG:
+ q6asm_cmd(prtd->audio_client, CMD_PAUSE);
+ q6asm_cmd(prtd->audio_client, CMD_FLUSH);
+ q6asm_run(prtd->audio_client, 0, 0, 0);
+ default:
+ break;
+ }
+}
+
+static void event_handler(uint32_t opcode,
+ uint32_t token, uint32_t *payload, void *priv)
+{
+ struct msm_audio *prtd = priv;
+ struct snd_pcm_substream *substream = prtd->substream;
+ uint32_t *ptrmem = (uint32_t *)payload;
+ int i = 0;
+ uint32_t idx = 0;
+ uint32_t size = 0;
+
+ pr_debug("%s\n", __func__);
+ switch (opcode) {
+ case ASM_DATA_EVENT_WRITE_DONE: {
+ pr_debug("ASM_DATA_EVENT_WRITE_DONE\n");
+ pr_debug("Buffer Consumed = 0x%08x\n", *ptrmem);
+ prtd->pcm_irq_pos += prtd->pcm_count;
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+ atomic_inc(&prtd->out_count);
+ wake_up(&the_locks.write_wait);
+ if (!atomic_read(&prtd->start))
+ break;
+ if (!prtd->mmap_flag)
+ break;
+ if (q6asm_is_cpu_buf_avail_nolock(IN,
+ prtd->audio_client,
+ &size, &idx)) {
+ pr_debug("%s:writing %d bytes of buffer to dsp 2\n",
+ __func__, prtd->pcm_count);
+ q6asm_write_nolock(prtd->audio_client,
+ prtd->pcm_count, 0, 0, NO_TIMESTAMP);
+ }
+ break;
+ }
+ case ASM_DATA_CMDRSP_EOS:
+ pr_debug("ASM_DATA_CMDRSP_EOS\n");
+ prtd->cmd_ack = 1;
+ wake_up(&the_locks.eos_wait);
+ break;
+ case ASM_DATA_EVENT_READ_DONE: {
+ pr_debug("ASM_DATA_EVENT_READ_DONE\n");
+ pr_debug("token = 0x%08x\n", token);
+ for (i = 0; i < 8; i++, ++ptrmem)
+ pr_debug("cmd[%d]=0x%08x\n", i, *ptrmem);
+ in_frame_info[token][0] = payload[2];
+ in_frame_info[token][1] = payload[3];
+
+ /* assume data size = 0 during flushing */
+ if (in_frame_info[token][0]) {
+ prtd->pcm_irq_pos += in_frame_info[token][0];
+ pr_debug("pcm_irq_pos=%d\n", prtd->pcm_irq_pos);
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+ if (atomic_read(&prtd->in_count) <= prtd->periods)
+ atomic_inc(&prtd->in_count);
+ wake_up(&the_locks.read_wait);
+ if (prtd->mmap_flag &&
+ q6asm_is_cpu_buf_avail_nolock(OUT,
+ prtd->audio_client,
+ &size, &idx))
+ q6asm_read_nolock(prtd->audio_client);
+ } else {
+ pr_debug("%s: reclaim flushed buf in_count %x\n",
+ __func__, atomic_read(&prtd->in_count));
+ atomic_inc(&prtd->in_count);
+ if (atomic_read(&prtd->in_count) == prtd->periods) {
+ pr_info("%s: reclaimed all bufs\n", __func__);
+ if (atomic_read(&prtd->start))
+ snd_pcm_period_elapsed(substream);
+ wake_up(&the_locks.read_wait);
+ }
+ }
+
+ break;
+ }
+ case APR_BASIC_RSP_RESULT: {
+ switch (payload[0]) {
+ case ASM_SESSION_CMD_RUN:
+ if (substream->stream
+ != SNDRV_PCM_STREAM_PLAYBACK) {
+ atomic_set(&prtd->start, 1);
+ break;
+ }
+ if (prtd->mmap_flag) {
+ pr_debug("%s:writing %d bytes"
+ " of buffer to dsp\n",
+ __func__,
+ prtd->pcm_count);
+ q6asm_write_nolock(prtd->audio_client,
+ prtd->pcm_count,
+ 0, 0, NO_TIMESTAMP);
+ } else {
+ while (atomic_read(&prtd->out_needed)) {
+ pr_debug("%s:writing %d bytes"
+ " of buffer to dsp\n",
+ __func__,
+ prtd->pcm_count);
+ q6asm_write_nolock(prtd->audio_client,
+ prtd->pcm_count,
+ 0, 0, NO_TIMESTAMP);
+ atomic_dec(&prtd->out_needed);
+ wake_up(&the_locks.write_wait);
+ };
+ }
+ atomic_set(&prtd->start, 1);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ pr_debug("Not Supported Event opcode[0x%x]\n", opcode);
+ break;
+ }
+}
+
+static int msm_pcm_playback_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+ int ret;
+
+ pr_debug("%s\n", __func__);
+ prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+ /* rate and channels are sent to audio driver */
+ prtd->samp_rate = runtime->rate;
+ prtd->channel_mode = runtime->channels;
+ if (prtd->enabled)
+ return 0;
+
+ ret = q6asm_media_format_block_pcm(prtd->audio_client, runtime->rate,
+ runtime->channels);
+ if (ret < 0)
+ pr_info("%s: CMD Format block failed\n", __func__);
+
+ atomic_set(&prtd->out_count, runtime->periods);
+
+ prtd->enabled = 1;
+ prtd->cmd_ack = 0;
+
+ return 0;
+}
+
+static int msm_pcm_capture_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+ int ret = 0;
+ int i = 0;
+ pr_debug("%s\n", __func__);
+ prtd->pcm_size = snd_pcm_lib_buffer_bytes(substream);
+ prtd->pcm_count = snd_pcm_lib_period_bytes(substream);
+ prtd->pcm_irq_pos = 0;
+
+ /* rate and channels are sent to audio driver */
+ prtd->samp_rate = runtime->rate;
+ prtd->channel_mode = runtime->channels;
+
+ if (prtd->enabled)
+ return 0;
+
+ pr_debug("Samp_rate = %d\n", prtd->samp_rate);
+ pr_debug("Channel = %d\n", prtd->channel_mode);
+ if (prtd->channel_mode > 2) {
+ ret = q6asm_enc_cfg_blk_multi_ch_pcm(prtd->audio_client,
+ prtd->samp_rate, prtd->channel_mode);
+ } else {
+ ret = q6asm_enc_cfg_blk_pcm(prtd->audio_client,
+ prtd->samp_rate, prtd->channel_mode);
+ }
+
+ if (ret < 0)
+ pr_debug("%s: cmd cfg pcm was block failed", __func__);
+
+ for (i = 0; i < runtime->periods; i++)
+ q6asm_read(prtd->audio_client);
+ prtd->periods = runtime->periods;
+
+ prtd->enabled = 1;
+
+ return ret;
+}
+
+static int msm_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+ int ret = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ pr_debug("%s: Trigger start\n", __func__);
+ q6asm_run_nowait(prtd->audio_client, 0, 0, 0);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ pr_debug("SNDRV_PCM_TRIGGER_STOP\n");
+ atomic_set(&prtd->start, 0);
+ if (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)
+ break;
+ prtd->cmd_ack = 0;
+ q6asm_cmd_nowait(prtd->audio_client, CMD_EOS);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ pr_debug("SNDRV_PCM_TRIGGER_PAUSE\n");
+ q6asm_cmd_nowait(prtd->audio_client, CMD_PAUSE);
+ atomic_set(&prtd->start, 0);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct msm_audio *prtd;
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+ prtd = kzalloc(sizeof(struct msm_audio), GFP_KERNEL);
+ if (prtd == NULL) {
+ pr_err("Failed to allocate memory for msm_audio\n");
+ return -ENOMEM;
+ }
+ prtd->substream = substream;
+ prtd->audio_client = q6asm_audio_client_alloc(
+ (app_cb)event_handler, prtd);
+ if (!prtd->audio_client) {
+ pr_info("%s: Could not allocate memory\n", __func__);
+ kfree(prtd);
+ return -ENOMEM;
+ }
+ prtd->audio_client->perf_mode = false;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ runtime->hw = msm_pcm_hardware_playback;
+ snd_soc_set_runtime_hwparams(substream, &msm_pcm_hardware_playback);
+ if(snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS) < 0)
+ pr_err("%s Failed to set hw periods\n", __func__);
+
+ if(snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE) < 0)
+ pr_err("%s Failed to set hw period size\n", __func__);
+
+ ret = q6asm_open_write(prtd->audio_client, FORMAT_LINEAR_PCM);
+ if (ret < 0) {
+ pr_err("%s: pcm out open failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ kfree(prtd);
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: session ID %d\n", __func__,
+ prtd->audio_client->session);
+ prtd->session_id = prtd->audio_client->session;
+ msm_pcm_routing_reg_phy_stream(soc_prtd->dai_link->be_id,
+ prtd->audio_client->perf_mode,
+ prtd->session_id, substream->stream);
+ prtd->cmd_ack = 1;
+
+ }
+ /* Capture path */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ runtime->hw = msm_pcm_hardware_capture;
+ }
+
+ ret = snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_sample_rates);
+ if (ret < 0)
+ pr_err("snd_pcm_hw_constraint_list failed\n");
+ /* Ensure that buffer size is a multiple of period size */
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+ if (ret < 0)
+ pr_err("snd_pcm_hw_constraint_integer failed\n");
+
+ if (snd_pcm_hw_constraint_step(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
+ 2048) <0)
+ pr_err("snd_pcm_hw_constraint_integer period bytes failed\n");
+
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ ret = snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_BUFFER_BYTES,
+ CAPTURE_MIN_NUM_PERIODS * CAPTURE_MIN_PERIOD_SIZE,
+ CAPTURE_MAX_NUM_PERIODS * CAPTURE_MAX_PERIOD_SIZE);
+ if (ret < 0) {
+ pr_err("constraint for buffer bytes min max ret = %d\n",
+ ret);
+ }
+ }
+
+ prtd->dsp_cnt = 0;
+ runtime->private_data = prtd;
+
+ return 0;
+}
+
+static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
+ snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+ int fbytes = 0;
+ int xfer = 0;
+ char *bufptr = NULL;
+ void *data = NULL;
+ uint32_t idx = 0;
+ uint32_t size = 0;
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+
+ fbytes = frames_to_bytes(runtime, frames);
+ pr_debug("%s: prtd->out_count = %d\n",
+ __func__, atomic_read(&prtd->out_count));
+ ret = wait_event_timeout(the_locks.write_wait,
+ (atomic_read(&prtd->out_count)), 5 * HZ);
+ if (ret < 0) {
+ pr_err("%s: wait_event_timeout failed\n", __func__);
+ goto fail;
+ }
+
+ if (!atomic_read(&prtd->out_count)) {
+ pr_err("%s: pcm stopped out_count 0\n", __func__);
+ return 0;
+ }
+
+ data = q6asm_is_cpu_buf_avail(IN, prtd->audio_client, &size, &idx);
+ bufptr = data;
+ if (bufptr) {
+ xfer = fbytes;
+ pr_debug("%s:fbytes =%d: xfer=%d size=%d bufptr=%p buf=%p\n",
+ __func__, fbytes, xfer, size, bufptr, buf);
+ if (copy_from_user(bufptr, buf, xfer)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ buf += xfer;
+ fbytes -= xfer;
+ pr_debug("%s:fbytes = %d: xfer=%d\n", __func__, fbytes, xfer);
+ if (atomic_read(&prtd->start)) {
+ pr_debug("%s:writing %d bytes of buffer to dsp\n",
+ __func__, xfer);
+ ret = q6asm_write(prtd->audio_client, xfer,
+ 0, 0, NO_TIMESTAMP);
+ if (ret < 0) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ } else
+ atomic_inc(&prtd->out_needed);
+ atomic_dec(&prtd->out_count);
+ }
+fail:
+ return ret;
+}
+
+static int msm_pcm_playback_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct msm_audio *prtd = runtime->private_data;
+ int dir = 0;
+ int ret = 0;
+
+ pr_debug("%s\n", __func__);
+
+ dir = IN;
+ ret = wait_event_timeout(the_locks.eos_wait,
+ prtd->cmd_ack, 5 * HZ);
+ if (ret < 0)
+ pr_err("%s: CMD_EOS failed\n", __func__);
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_audio_client_buf_free_contiguous(dir,
+ prtd->audio_client);
+
+ msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ q6asm_audio_client_free(prtd->audio_client);
+ kfree(prtd);
+ return 0;
+}
+
+static int msm_pcm_capture_copy(struct snd_pcm_substream *substream,
+ int channel, snd_pcm_uframes_t hwoff, void __user *buf,
+ snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+ int fbytes = 0;
+ int xfer;
+ char *bufptr;
+ void *data = NULL;
+ static uint32_t idx;
+ static uint32_t size;
+ uint32_t offset = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = substream->runtime->private_data;
+
+
+ pr_debug("%s\n", __func__);
+ fbytes = frames_to_bytes(runtime, frames);
+
+ pr_debug("appl_ptr %d\n", (int)runtime->control->appl_ptr);
+ pr_debug("hw_ptr %d\n", (int)runtime->status->hw_ptr);
+ pr_debug("avail_min %d\n", (int)runtime->control->avail_min);
+
+ ret = wait_event_timeout(the_locks.read_wait,
+ (atomic_read(&prtd->in_count)), 5 * HZ);
+ if (ret < 0) {
+ pr_debug("%s: wait_event_timeout failed\n", __func__);
+ goto fail;
+ }
+ if (!atomic_read(&prtd->in_count)) {
+ pr_debug("%s: pcm stopped in_count 0\n", __func__);
+ return 0;
+ }
+ pr_debug("Checking if valid buffer is available...%08x\n",
+ (unsigned int) data);
+ data = q6asm_is_cpu_buf_avail(OUT, prtd->audio_client, &size, &idx);
+ bufptr = data;
+ pr_debug("Size = %d\n", size);
+ pr_debug("fbytes = %d\n", fbytes);
+ pr_debug("idx = %d\n", idx);
+ if (bufptr) {
+ xfer = fbytes;
+ if (xfer > size)
+ xfer = size;
+ offset = in_frame_info[idx][1];
+ pr_debug("Offset value = %d\n", offset);
+ if (copy_to_user(buf, bufptr+offset, xfer)) {
+ pr_err("Failed to copy buf to user\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+ fbytes -= xfer;
+ size -= xfer;
+ in_frame_info[idx][1] += xfer;
+ pr_debug("%s:fbytes = %d: size=%d: xfer=%d\n",
+ __func__, fbytes, size, xfer);
+ pr_debug(" Sending next buffer to dsp\n");
+ memset(&in_frame_info[idx], 0,
+ sizeof(uint32_t) * 2);
+ atomic_dec(&prtd->in_count);
+ ret = q6asm_read(prtd->audio_client);
+ if (ret < 0) {
+ pr_err("q6asm read failed\n");
+ ret = -EFAULT;
+ goto fail;
+ }
+ } else
+ pr_err("No valid buffer\n");
+
+ pr_debug("Returning from capture_copy... %d\n", ret);
+fail:
+ return ret;
+}
+
+static int msm_pcm_capture_close(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct msm_audio *prtd = runtime->private_data;
+ int dir = OUT;
+
+ pr_debug("%s\n", __func__);
+ if (prtd->audio_client) {
+ q6asm_cmd(prtd->audio_client, CMD_CLOSE);
+ q6asm_audio_client_buf_free_contiguous(dir,
+ prtd->audio_client);
+ q6asm_audio_client_free(prtd->audio_client);
+ }
+
+ msm_pcm_routing_dereg_phy_stream(soc_prtd->dai_link->be_id,
+ SNDRV_PCM_STREAM_CAPTURE);
+ kfree(prtd);
+
+ return 0;
+}
+
+static int msm_pcm_copy(struct snd_pcm_substream *substream, int a,
+ snd_pcm_uframes_t hwoff, void __user *buf, snd_pcm_uframes_t frames)
+{
+ int ret = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_copy(substream, a, hwoff, buf, frames);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_copy(substream, a, hwoff, buf, frames);
+ return ret;
+}
+
+static int msm_pcm_close(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_close(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_close(substream);
+ return ret;
+}
+static int msm_pcm_prepare(struct snd_pcm_substream *substream)
+{
+ int ret = 0;
+ //q6_hdmi_prepare(substream);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ ret = msm_pcm_playback_prepare(substream);
+ else if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ ret = msm_pcm_capture_prepare(substream);
+ return ret;
+}
+
+static snd_pcm_uframes_t msm_pcm_pointer(struct snd_pcm_substream *substream)
+{
+
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+
+ if (prtd->pcm_irq_pos >= prtd->pcm_size)
+ prtd->pcm_irq_pos = 0;
+
+ pr_debug("pcm_irq_pos = %d\n", prtd->pcm_irq_pos);
+ return bytes_to_frames(runtime, (prtd->pcm_irq_pos));
+}
+
+static int msm_pcm_mmap(struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma)
+{
+ int result = 0;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+
+ pr_debug("%s\n", __func__);
+ prtd->mmap_flag = 1;
+
+ if (runtime->dma_addr && runtime->dma_bytes) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ result = remap_pfn_range(vma, vma->vm_start,
+ runtime->dma_addr >> PAGE_SHIFT,
+ runtime->dma_bytes,
+ vma->vm_page_prot);
+ } else {
+ pr_err("Physical address or size of buf is NULL");
+ return -EINVAL;
+ }
+
+ return result;
+}
+
+static int msm_pcm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct msm_audio *prtd = runtime->private_data;
+ struct snd_dma_buffer *dma_buf = &substream->dma_buffer;
+ struct snd_soc_pcm_runtime *soc_prtd = substream->private_data;
+ struct audio_buffer *buf;
+ int dir, ret;
+ int format = FORMAT_LINEAR_PCM;
+ struct msm_pcm_routing_evt event;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = IN;
+ else
+ dir = OUT;
+
+ /*capture path*/
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (params_channels(params) > 2)
+ format = FORMAT_MULTI_CHANNEL_LINEAR_PCM;
+ pr_debug("%s format = :0x%x\n", __func__, format);
+
+ ret = q6asm_open_read(prtd->audio_client, format);
+ if (ret < 0) {
+ pr_err("%s: q6asm_open_read failed\n", __func__);
+ q6asm_audio_client_free(prtd->audio_client);
+ prtd->audio_client = NULL;
+ return -ENOMEM;
+ }
+
+ pr_debug("%s: session ID %d\n", __func__,
+ prtd->audio_client->session);
+ prtd->session_id = prtd->audio_client->session;
+ event.event_func = msm_pcm_route_event_handler;
+ event.priv_data = (void *) prtd;
+ msm_pcm_routing_reg_phy_stream_v2(soc_prtd->dai_link->be_id,
+ prtd->audio_client->perf_mode,
+ prtd->session_id,
+ substream->stream, event);
+ }
+
+ if (dir == OUT) {
+ ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+ prtd->audio_client,
+ (params_buffer_bytes(params) / params_periods(params)),
+ params_periods(params));
+ pr_debug("buff bytes = %d, period size = %d,\
+ period count = %d\n", params_buffer_bytes(params),
+ params_periods(params),
+ params_buffer_bytes(params) / params_periods(params));
+ } else {
+ ret = q6asm_audio_client_buf_alloc_contiguous(dir,
+ prtd->audio_client,
+ runtime->hw.period_bytes_min,
+ runtime->hw.periods_max);
+ pr_debug("buff bytes = %d, period size = %d,\
+ period count = %d\n", params_buffer_bytes(params),
+ params_periods(params),
+ params_buffer_bytes(params) / params_periods(params));
+ }
+
+ if (ret < 0) {
+ pr_err("Audio Start: Buffer Allocation failed \
+ rc = %d\n", ret);
+ return -ENOMEM;
+ }
+ buf = prtd->audio_client->port[dir].buf;
+ if (buf == NULL || buf[0].data == NULL)
+ return -ENOMEM;
+
+ pr_debug("%s:buf = %p\n", __func__, buf);
+ dma_buf->dev.type = SNDRV_DMA_TYPE_DEV;
+ dma_buf->dev.dev = substream->pcm->card->dev;
+ dma_buf->private_data = NULL;
+ dma_buf->area = buf[0].data;
+ dma_buf->addr = buf[0].phys;
+ dma_buf->bytes = runtime->hw.buffer_bytes_max;
+ if (dir == IN)
+ dma_buf->bytes = runtime->hw.buffer_bytes_max;
+ else
+ dma_buf->bytes = params_buffer_bytes(params);
+
+ if (!dma_buf->area)
+ return -ENOMEM;
+
+ snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+ return 0;
+}
+
+static struct snd_pcm_ops msm_pcm_ops = {
+ .open = msm_pcm_open,
+ .copy = msm_pcm_copy,
+ .hw_params = msm_pcm_hw_params,
+ .close = msm_pcm_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .prepare = msm_pcm_prepare,
+ .trigger = msm_pcm_trigger,
+ .pointer = msm_pcm_pointer,
+ .mmap = msm_pcm_mmap,
+};
+
+static int msm_asoc_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_card *card = rtd->card->snd_card;
+
+ if (!card->dev->coherent_dma_mask)
+ card->dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_platform = {
+ .ops = &msm_pcm_ops,
+ .pcm_new = msm_asoc_pcm_new,
+};
+
+static int msm_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+ pr_info("%s: dev name %s\n", __func__, dev_name(&pdev->dev));
+ init_waitqueue_head(&the_locks.enable_wait);
+ init_waitqueue_head(&the_locks.eos_wait);
+ init_waitqueue_head(&the_locks.write_wait);
+ init_waitqueue_head(&the_locks.read_wait);
+ ret = snd_soc_register_platform(&pdev->dev,
+ &msm_soc_platform);
+ return ret;
+}
+
+static int msm_pcm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_pcm_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-dsp"},
+ {}
+};
+
+static struct platform_driver msm_pcm_driver = {
+ .driver = {
+ .name = "msm-pcm-dsp",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_pcm_dt_match,
+ },
+ .probe = msm_pcm_probe,
+ .remove = msm_pcm_remove,
+};
+
+static int __init msm_soc_platform_init(void)
+{
+
+ return platform_driver_register(&msm_pcm_driver);
+}
+module_init(msm_soc_platform_init);
+
+static void __exit msm_soc_platform_exit(void)
+{
+ platform_driver_unregister(&msm_pcm_driver);
+}
+module_exit(msm_soc_platform_exit);
+
+MODULE_DESCRIPTION("PCM module platform driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("platform:msm-pcm-dsp");
diff --git a/sound/soc/qcom/msm-pcm-q6.h b/sound/soc/qcom/msm-pcm-q6.h
new file mode 100644
index 0000000000000..0522f549e8185
--- /dev/null
+++ b/sound/soc/qcom/msm-pcm-q6.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ * Copyright (C) 2008 HTC Corporation
+ * Copyright (c) 2008-2009,2011 The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org.
+ */
+
+#ifndef _MSM_PCM_H
+#define _MSM_PCM_H
+#include <sound/apr_audio.h>
+#include <sound/q6asm.h>
+
+
+/* Support unconventional sample rates 12000, 24000 as well */
+#define USE_RATE \
+ (SNDRV_PCM_RATE_8000_48000 | SNDRV_PCM_RATE_KNOT)
+
+extern int copy_count;
+
+struct buffer {
+ void *data;
+ unsigned size;
+ unsigned used;
+ unsigned addr;
+};
+
+struct buffer_rec {
+ void *data;
+ unsigned int size;
+ unsigned int read;
+ unsigned int addr;
+};
+
+struct audio_locks {
+ spinlock_t event_lock;
+ wait_queue_head_t read_wait;
+ wait_queue_head_t write_wait;
+ wait_queue_head_t eos_wait;
+ wait_queue_head_t enable_wait;
+ wait_queue_head_t flush_wait;
+};
+
+struct msm_audio {
+ struct snd_pcm_substream *substream;
+ unsigned int pcm_size;
+ unsigned int pcm_count;
+ unsigned int pcm_irq_pos; /* IRQ position */
+ uint16_t source; /* Encoding source bit mask */
+
+ struct audio_client *audio_client;
+
+ uint16_t session_id;
+
+ uint32_t samp_rate;
+ uint32_t channel_mode;
+ uint32_t dsp_cnt;
+
+ int abort; /* set when error, like sample rate mismatch */
+
+ int enabled;
+ int close_ack;
+ int cmd_ack;
+ atomic_t start;
+ atomic_t stop;
+ atomic_t out_count;
+ atomic_t in_count;
+ atomic_t out_needed;
+ atomic_t eos;
+ int out_head;
+ int periods;
+ int mmap_flag;
+ atomic_t pending_buffer;
+ int cmd_interrupt;
+ bool meta_data_mode;
+};
+
+struct output_meta_data_st {
+ uint32_t meta_data_length;
+ uint32_t frame_size;
+ uint32_t timestamp_lsw;
+ uint32_t timestamp_msw;
+ uint32_t reserved[12];
+};
+
+#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/qcom/msm-pcm-routing.c b/sound/soc/qcom/msm-pcm-routing.c
new file mode 100644
index 0000000000000..27785805e7f86
--- /dev/null
+++ b/sound/soc/qcom/msm-pcm-routing.c
@@ -0,0 +1,894 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <sound/core.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/initval.h>
+#include <sound/control.h>
+#include <sound/q6adm.h>
+#include <sound/q6asm.h>
+#include <sound/q6afe.h>
+#include <sound/tlv.h>
+#include "msm-pcm-routing.h"
+
+struct msm_pcm_routing_bdai_data {
+ u16 port_id; /* AFE port ID */
+ u8 active; /* track if this backend is enabled */
+ unsigned long fe_sessions; /* Front-end sessions */
+ unsigned long port_sessions; /* track Tx BE ports -> Rx BE */
+ unsigned int sample_rate;
+ unsigned int channel;
+ bool perf_mode;
+};
+
+struct msm_pcm_routing_fdai_data {
+ u16 be_srate; /* track prior backend sample rate for flushing purpose */
+ int strm_id; /* ASM stream ID */
+ struct msm_pcm_routing_evt event_info;
+};
+
+#define INVALID_SESSION -1
+#define SESSION_TYPE_RX 0
+#define SESSION_TYPE_TX 1
+
+static DEFINE_MUTEX(routing_lock);
+
+static int srs_alsa_ctrl_ever_called;
+
+#define INT_RX_VOL_MAX_STEPS 0x2000
+#define INT_RX_VOL_GAIN 0x2000
+#define INT_RX_LR_VOL_MAX_STEPS 0x20002000
+
+static const DECLARE_TLV_DB_LINEAR(fm_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
+static const DECLARE_TLV_DB_LINEAR(lpa_rx_vol_gain, 0,
+ INT_RX_LR_VOL_MAX_STEPS);
+
+static const DECLARE_TLV_DB_LINEAR(multimedia2_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
+static const DECLARE_TLV_DB_LINEAR(multimedia5_rx_vol_gain, 0,
+ INT_RX_VOL_MAX_STEPS);
+
+static const DECLARE_TLV_DB_LINEAR(compressed_rx_vol_gain, 0,
+ INT_RX_LR_VOL_MAX_STEPS);
+
+/* Equal to Frontend after last of the MULTIMEDIA SESSIONS */
+#define MAX_EQ_SESSIONS MSM_FRONTEND_DAI_CS_VOICE
+
+enum {
+ EQ_BAND1 = 0,
+ EQ_BAND2,
+ EQ_BAND3,
+ EQ_BAND4,
+ EQ_BAND5,
+ EQ_BAND6,
+ EQ_BAND7,
+ EQ_BAND8,
+ EQ_BAND9,
+ EQ_BAND10,
+ EQ_BAND11,
+ EQ_BAND12,
+ EQ_BAND_MAX,
+};
+
+struct msm_audio_eq_band {
+ uint16_t band_idx; /* The band index, 0 .. 11 */
+ uint32_t filter_type; /* Filter band type */
+ uint32_t center_freq_hz; /* Filter band center frequency */
+ uint32_t filter_gain; /* Filter band initial gain (dB) */
+ /* Range is +12 dB to -12 dB with 1dB increments. */
+ uint32_t q_factor;
+} __packed;
+
+struct msm_audio_eq_stream_config {
+ uint32_t enable; /* Number of consequtive bands specified */
+ uint32_t num_bands;
+ struct msm_audio_eq_band eq_bands[EQ_BAND_MAX];
+} __packed;
+
+struct msm_audio_eq_stream_config eq_data[MAX_EQ_SESSIONS];
+
+static void msm_send_eq_values(int eq_idx);
+/* This array is indexed by back-end DAI ID defined in msm-pcm-routing.h
+ * If new back-end is defined, add new back-end DAI ID at the end of enum
+ */
+
+union srs_trumedia_params_u {
+ struct srs_trumedia_params srs_params;
+ unsigned short int raw_params[1];
+};
+static union srs_trumedia_params_u msm_srs_trumedia_params[2];
+static int srs_port_id = -1;
+
+static void srs_send_params(int port_id, unsigned int techs,
+ int param_block_idx) {
+ /* only send commands to dsp if srs alsa ctrl was used
+ at least one time */
+ if (!srs_alsa_ctrl_ever_called)
+ return;
+
+ pr_debug("SRS %s: called, port_id = %d, techs flags = %u,"
+ " paramblockidx %d", __func__, port_id, techs,
+ param_block_idx);
+ /* force all if techs is set to 1 */
+ if (techs == 1)
+ techs = 0xFFFFFFFF;
+
+ if (techs & (1 << SRS_ID_WOWHD))
+ srs_trumedia_open(port_id, SRS_ID_WOWHD,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.wowhd);
+ if (techs & (1 << SRS_ID_CSHP))
+ srs_trumedia_open(port_id, SRS_ID_CSHP,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.cshp);
+ if (techs & (1 << SRS_ID_HPF))
+ srs_trumedia_open(port_id, SRS_ID_HPF,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.hpf);
+ if (techs & (1 << SRS_ID_PEQ))
+ srs_trumedia_open(port_id, SRS_ID_PEQ,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.peq);
+ if (techs & (1 << SRS_ID_HL))
+ srs_trumedia_open(port_id, SRS_ID_HL,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.hl);
+ if (techs & (1 << SRS_ID_GLOBAL))
+ srs_trumedia_open(port_id, SRS_ID_GLOBAL,
+ (void *)&msm_srs_trumedia_params[param_block_idx].srs_params.global);
+}
+
+static struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
+ { PRIMARY_I2S_RX, 0, 0, 0, 0, 0},
+ { PRIMARY_I2S_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_0_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_0_TX, 0, 0, 0, 0, 0},
+ { HDMI_RX, 0, 0, 0, 0, 0},
+ { INT_BT_SCO_RX, 0, 0, 0, 0, 0},
+ { INT_BT_SCO_TX, 0, 0, 0, 0, 0},
+ { INT_FM_RX, 0, 0, 0, 0, 0},
+ { INT_FM_TX, 0, 0, 0, 0, 0},
+ { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0},
+ { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0},
+ { PCM_RX, 0, 0, 0, 0, 0},
+ { PCM_TX, 0, 0, 0, 0, 0},
+ { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0},
+ { VOICE_RECORD_RX, 0, 0, 0, 0, 0},
+ { VOICE_RECORD_TX, 0, 0, 0, 0, 0},
+ { MI2S_RX, 0, 0, 0, 0, 0},
+ { MI2S_TX, 0, 0, 0, 0},
+ { SECONDARY_I2S_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_1_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_1_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_4_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_4_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_3_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_3_TX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0},
+ { SECONDARY_PCM_RX, 0, 0, 0, 0, 0},
+ { SECONDARY_PCM_TX, 0, 0, 0, 0, 0},
+};
+
+/* Track ASM playback & capture sessions of DAI */
+static struct msm_pcm_routing_fdai_data
+ fe_dai_map[MSM_FRONTEND_DAI_MM_SIZE][2] = {
+ /* MULTIMEDIA1 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA2 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA3 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA4 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA5 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA6 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA7*/
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+ /* MULTIMEDIA8 */
+ {{0, INVALID_SESSION, {NULL, NULL} },
+ {0, INVALID_SESSION, {NULL, NULL} } },
+};
+
+static uint8_t is_be_dai_extproc(int be_dai)
+{
+ if (be_dai == MSM_BACKEND_DAI_EXTPROC_RX ||
+ be_dai == MSM_BACKEND_DAI_EXTPROC_TX ||
+ be_dai == MSM_BACKEND_DAI_EXTPROC_EC_TX)
+ return 1;
+ else
+ return 0;
+}
+
+static void msm_pcm_routing_build_matrix(int fedai_id, int dspst_id,
+ int path_type)
+{
+ int i, port_type;
+ struct route_payload payload;
+
+ payload.num_copps = 0;
+ port_type = (path_type == ADM_PATH_PLAYBACK ?
+ MSM_AFE_PORT_TYPE_RX : MSM_AFE_PORT_TYPE_TX);
+
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
+ payload.copp_ids[payload.num_copps++] =
+ msm_bedais[i].port_id;
+ }
+
+ if (payload.num_copps)
+ adm_matrix_map(dspst_id, path_type,
+ payload.num_copps, payload.copp_ids, 0);
+}
+
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+ int stream_type, int enable)
+{
+ int i, session_type, path_type, port_type;
+ u32 mode = 0;
+
+ if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* bad ID assigned in machine driver */
+ pr_err("%s: bad MM ID\n", __func__);
+ return;
+ }
+
+ if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+ session_type = SESSION_TYPE_RX;
+ path_type = ADM_PATH_PLAYBACK;
+ port_type = MSM_AFE_PORT_TYPE_RX;
+ } else {
+ session_type = SESSION_TYPE_TX;
+ path_type = ADM_PATH_LIVE_REC;
+ port_type = MSM_AFE_PORT_TYPE_TX;
+ }
+
+ mutex_lock(&routing_lock);
+
+ fe_dai_map[fedai_id][session_type].strm_id = dspst_id;
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+ mode = afe_get_port_type(msm_bedais[i].port_id);
+ if (enable)
+ adm_connect_afe_port(mode, dspst_id,
+ msm_bedais[i].port_id);
+ else
+ adm_disconnect_afe_port(mode, dspst_id,
+ msm_bedais[i].port_id);
+
+ break;
+ }
+ }
+ mutex_unlock(&routing_lock);
+}
+
+void msm_pcm_routing_reg_phy_stream(int fedai_id, bool perf_mode, int dspst_id,
+ int stream_type)
+{
+ int i, session_type, path_type, port_type;
+ struct route_payload payload;
+ u32 channels;
+
+ if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* bad ID assigned in machine driver */
+ pr_err("%s: bad MM ID %d\n", __func__, fedai_id);
+ return;
+ }
+
+ if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+ session_type = SESSION_TYPE_RX;
+ path_type = ADM_PATH_PLAYBACK;
+ port_type = MSM_AFE_PORT_TYPE_RX;
+ } else {
+ session_type = SESSION_TYPE_TX;
+ path_type = ADM_PATH_LIVE_REC;
+ port_type = MSM_AFE_PORT_TYPE_TX;
+ }
+
+ mutex_lock(&routing_lock);
+
+ payload.num_copps = 0; /* only RX needs to use payload */
+ fe_dai_map[fedai_id][session_type].strm_id = dspst_id;
+ /* re-enable EQ if active */
+ if (eq_data[fedai_id].enable)
+ msm_send_eq_values(fedai_id);
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
+ msm_bedais[i].perf_mode = perf_mode;
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions))) {
+
+ channels = msm_bedais[i].channel;
+
+ if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
+ ((channels == 1) || (channels == 2)) &&
+ msm_bedais[i].perf_mode) {
+ pr_debug("%s configure COPP to lowlatency mode",
+ __func__);
+ adm_multi_ch_copp_open(msm_bedais[i].port_id,
+ path_type,
+ msm_bedais[i].sample_rate,
+ msm_bedais[i].channel,
+ DEFAULT_COPP_TOPOLOGY, msm_bedais[i].perf_mode);
+ } else if ((stream_type == SNDRV_PCM_STREAM_PLAYBACK) &&
+ (channels > 2))
+ adm_multi_ch_copp_open(msm_bedais[i].port_id,
+ path_type,
+ msm_bedais[i].sample_rate,
+ msm_bedais[i].channel,
+ DEFAULT_COPP_TOPOLOGY, msm_bedais[i].perf_mode);
+ else
+ adm_open(msm_bedais[i].port_id,
+ path_type,
+ msm_bedais[i].sample_rate,
+ msm_bedais[i].channel,
+ DEFAULT_COPP_TOPOLOGY);
+
+ payload.copp_ids[payload.num_copps++] =
+ msm_bedais[i].port_id;
+ srs_port_id = msm_bedais[i].port_id;
+ srs_send_params(srs_port_id, 1, 0);
+ }
+ }
+ if (payload.num_copps)
+ adm_matrix_map(dspst_id, path_type,
+ payload.num_copps, payload.copp_ids, 0);
+
+ mutex_unlock(&routing_lock);
+}
+void msm_pcm_routing_reg_phy_stream_v2(int fedai_id, bool perf_mode,
+ int dspst_id, int stream_type,
+ struct msm_pcm_routing_evt event_info)
+{
+ msm_pcm_routing_reg_phy_stream(fedai_id, perf_mode, dspst_id,
+ stream_type);
+
+ if (stream_type == SNDRV_PCM_STREAM_PLAYBACK)
+ fe_dai_map[fedai_id][SESSION_TYPE_RX].event_info = event_info;
+ else
+ fe_dai_map[fedai_id][SESSION_TYPE_TX].event_info = event_info;
+
+}
+
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type)
+{
+ int i, port_type, session_type;
+
+ if (fedai_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* bad ID assigned in machine driver */
+ pr_err("%s: bad MM ID\n", __func__);
+ return;
+ }
+
+ if (stream_type == SNDRV_PCM_STREAM_PLAYBACK) {
+ port_type = MSM_AFE_PORT_TYPE_RX;
+ session_type = SESSION_TYPE_RX;
+ } else {
+ port_type = MSM_AFE_PORT_TYPE_TX;
+ session_type = SESSION_TYPE_TX;
+ }
+
+ mutex_lock(&routing_lock);
+
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (!is_be_dai_extproc(i) &&
+ (afe_get_port_type(msm_bedais[i].port_id) == port_type) &&
+ (msm_bedais[i].active) &&
+ (test_bit(fedai_id, &msm_bedais[i].fe_sessions)))
+ adm_close(msm_bedais[i].port_id);
+ }
+
+ fe_dai_map[fedai_id][session_type].strm_id = INVALID_SESSION;
+ fe_dai_map[fedai_id][session_type].be_srate = 0;
+ mutex_unlock(&routing_lock);
+}
+
+/* Check if FE/BE route is set */
+static bool msm_pcm_routing_route_is_set(u16 be_id, u16 fe_id)
+{
+ bool rc = false;
+
+ if (fe_id > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* recheck FE ID in the mixer control defined in this file */
+ pr_err("%s: bad MM ID\n", __func__);
+ return rc;
+ }
+
+ if (test_bit(fe_id, &msm_bedais[be_id].fe_sessions))
+ rc = true;
+
+ return rc;
+}
+
+static void msm_pcm_routing_process_audio(u16 reg, u16 val, int set)
+{
+ int session_type, path_type;
+ u32 channels;
+ struct msm_pcm_routing_fdai_data *fdai;
+
+
+ if (val > MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* recheck FE ID in the mixer control defined in this file */
+ pr_err("%s: bad MM ID\n", __func__);
+ return;
+ }
+
+ if (afe_get_port_type(msm_bedais[reg].port_id) ==
+ MSM_AFE_PORT_TYPE_RX) {
+ session_type = SESSION_TYPE_RX;
+ path_type = ADM_PATH_PLAYBACK;
+ } else {
+ session_type = SESSION_TYPE_TX;
+ path_type = ADM_PATH_LIVE_REC;
+ }
+
+ mutex_lock(&routing_lock);
+
+ if (set) {
+
+ set_bit(val, &msm_bedais[reg].fe_sessions);
+ fdai = &fe_dai_map[val][session_type];
+ if (msm_bedais[reg].active && fdai->strm_id !=
+ INVALID_SESSION) {
+ channels = msm_bedais[reg].channel;
+
+ if (session_type == SESSION_TYPE_TX && fdai->be_srate &&
+ (fdai->be_srate != msm_bedais[reg].sample_rate)) {
+ pr_debug("%s: flush strm %d due diff BE rates\n",
+ __func__, fdai->strm_id);
+
+ if (fdai->event_info.event_func)
+ fdai->event_info.event_func(
+ MSM_PCM_RT_EVT_BUF_RECFG,
+ fdai->event_info.priv_data);
+ fdai->be_srate = 0; /* might not need it */
+ }
+
+ if ((session_type == SESSION_TYPE_RX) &&
+ ((channels == 1) || (channels == 2))
+ && msm_bedais[reg].perf_mode) {
+ adm_multi_ch_copp_open(msm_bedais[reg].port_id,
+ path_type,
+ msm_bedais[reg].sample_rate,
+ channels,
+ DEFAULT_COPP_TOPOLOGY,
+ msm_bedais[reg].perf_mode);
+ pr_debug("%s:configure COPP to lowlatency mode",
+ __func__);
+ } else if ((session_type == SESSION_TYPE_RX)
+ && (channels > 2))
+ adm_multi_ch_copp_open(msm_bedais[reg].port_id,
+ path_type,
+ msm_bedais[reg].sample_rate,
+ channels,
+ DEFAULT_COPP_TOPOLOGY,
+ msm_bedais[reg].perf_mode);
+ else
+ adm_open(msm_bedais[reg].port_id,
+ path_type,
+ msm_bedais[reg].sample_rate, channels,
+ DEFAULT_COPP_TOPOLOGY);
+
+ if (session_type == SESSION_TYPE_RX &&
+ fdai->event_info.event_func)
+ fdai->event_info.event_func(
+ MSM_PCM_RT_EVT_DEVSWITCH,
+ fdai->event_info.priv_data);
+
+ msm_pcm_routing_build_matrix(val,
+ fdai->strm_id, path_type);
+ srs_port_id = msm_bedais[reg].port_id;
+ srs_send_params(srs_port_id, 1, 0);
+ }
+ } else {
+ clear_bit(val, &msm_bedais[reg].fe_sessions);
+ fdai = &fe_dai_map[val][session_type];
+ if (msm_bedais[reg].active && fdai->strm_id !=
+ INVALID_SESSION) {
+ fdai->be_srate = msm_bedais[reg].sample_rate;
+ adm_close(msm_bedais[reg].port_id);
+ msm_pcm_routing_build_matrix(val,
+ fdai->strm_id, path_type);
+ }
+ }
+
+ mutex_unlock(&routing_lock);
+}
+
+static int msm_routing_get_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+
+ if (test_bit(mc->shift, &msm_bedais[mc->reg].fe_sessions))
+ ucontrol->value.integer.value[0] = 1;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ pr_debug("%s: reg %x shift %x val %ld\n", __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int msm_routing_put_audio_mixer(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+
+ struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
+
+ struct soc_mixer_control *mc =
+ (struct soc_mixer_control *)kcontrol->private_value;
+ unsigned int mask = (1 << fls(mc->max)) - 1;
+ unsigned short val = (ucontrol->value.integer.value[0] & mask);
+ struct snd_soc_dapm_update update;
+ struct snd_soc_card *card;
+
+ card = dapm->card;
+ update.kcontrol = kcontrol;
+ update.reg = mc->reg;
+ update.mask = mask;
+ update.val = val;
+
+
+ if (ucontrol->value.integer.value[0] &&
+ msm_pcm_routing_route_is_set(mc->reg, mc->shift) == false) {
+ msm_pcm_routing_process_audio(mc->reg, mc->shift, 1);
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 1, &update);
+ } else if (!ucontrol->value.integer.value[0] &&
+ msm_pcm_routing_route_is_set(mc->reg, mc->shift) == true) {
+ msm_pcm_routing_process_audio(mc->reg, mc->shift, 0);
+ snd_soc_dapm_mixer_update_power(dapm, kcontrol, 0, &update);
+ }
+
+ return 0;
+}
+
+static void msm_send_eq_values(int eq_idx)
+{
+ int result;
+ struct audio_client *ac = q6asm_get_audio_client(
+ fe_dai_map[eq_idx][SESSION_TYPE_RX].strm_id);
+
+ if (ac == NULL) {
+ pr_err("%s: Could not get audio client for session: %d\n",
+ __func__, fe_dai_map[eq_idx][SESSION_TYPE_RX].strm_id);
+ goto done;
+ }
+
+ result = q6asm_equalizer(ac, &eq_data[eq_idx]);
+
+ if (result < 0)
+ pr_err("%s: Call to ASM equalizer failed, returned = %d\n",
+ __func__, result);
+done:
+ return;
+}
+
+static const struct snd_kcontrol_new hdmi_mixer_controls[] = {
+ SOC_SINGLE_EXT("MultiMedia1", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA1, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia2", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA2, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia3", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA3, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia4", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA4, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia5", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA5, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia6", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA6, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia7", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA7, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+ SOC_SINGLE_EXT("MultiMedia8", MSM_BACKEND_DAI_HDMI_RX,
+ MSM_FRONTEND_DAI_MULTIMEDIA8, 1, 0, msm_routing_get_audio_mixer,
+ msm_routing_put_audio_mixer),
+};
+
+static const struct snd_soc_dapm_widget msm_qdsp6_widgets[] = {
+ /* Frontend AIF */
+ /* Widget name equals to Front-End DAI name<Need confirmation>,
+ * Stream name must contains substring of front-end dai name
+ */
+ SND_SOC_DAPM_AIF_IN("MM_DL1", "MultiMedia1 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL2", "MultiMedia2 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL3", "MultiMedia3 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL4", "MultiMedia4 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL5", "MultiMedia5 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL6", "MultiMedia6 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL7", "MultiMedia7 Playback", 0, 0, 0, 0),
+ SND_SOC_DAPM_AIF_IN("MM_DL8", "MultiMedia8 Playback", 0, 0, 0, 0),
+ /* Backend AIF */
+ /* Stream name equals to backend dai link stream name
+ */
+ SND_SOC_DAPM_AIF_OUT("HDMI", "HDMI Playback", 0, 0, 0 , 0),
+ /* incall */
+ /* Mixer definitions */
+ SND_SOC_DAPM_MIXER("HDMI Mixer", SND_SOC_NOPM, 0, 0,
+ hdmi_mixer_controls, ARRAY_SIZE(hdmi_mixer_controls)),
+ SND_SOC_DAPM_OUTPUT("BE_OUT"),
+};
+
+static const struct snd_soc_dapm_route intercon[] = {
+ {"HDMI Mixer", "MultiMedia1", "MM_DL1"},
+ {"HDMI Mixer", "MultiMedia2", "MM_DL2"},
+ {"HDMI Mixer", "MultiMedia3", "MM_DL3"},
+ {"HDMI Mixer", "MultiMedia4", "MM_DL4"},
+ {"HDMI Mixer", "MultiMedia5", "MM_DL5"},
+ {"HDMI Mixer", "MultiMedia6", "MM_DL6"},
+ {"HDMI Mixer", "MultiMedia7", "MM_DL7"},
+ {"HDMI Mixer", "MultiMedia8", "MM_DL8"},
+ {"HDMI", NULL, "HDMI Mixer"},
+ {"BE_OUT", NULL, "HDMI Playback"},
+ {"HDMI Playback", NULL, "MultiMedia1 Playback"},
+};
+
+int msm_pcm_routing_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int be_id = rtd->dai_link->be_id;
+
+ if (be_id >= MSM_BACKEND_DAI_MAX) {
+ pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+ return -EINVAL;
+ }
+
+ mutex_lock(&routing_lock);
+ msm_bedais[be_id].sample_rate = params_rate(params);
+ msm_bedais[be_id].channel = params_channels(params);
+ mutex_unlock(&routing_lock);
+ return 0;
+}
+
+static int msm_pcm_routing_close(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int be_id = rtd->dai_link->be_id;
+ int i, session_type;
+ struct msm_pcm_routing_bdai_data *bedai;
+
+ if (be_id >= MSM_BACKEND_DAI_MAX) {
+ pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+ return -EINVAL;
+ }
+
+ bedai = &msm_bedais[be_id];
+ session_type = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ 0 : 1);
+
+ mutex_lock(&routing_lock);
+
+ for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+ if (fe_dai_map[i][session_type].strm_id != INVALID_SESSION) {
+ fe_dai_map[i][session_type].be_srate =
+ bedai->sample_rate;
+ adm_close(bedai->port_id);
+ srs_port_id = -1;
+ }
+ }
+
+ bedai->active = 0;
+ bedai->sample_rate = 0;
+ bedai->channel = 0;
+ bedai->perf_mode = false;
+ mutex_unlock(&routing_lock);
+
+ return 0;
+}
+
+int msm_pcm_routing_prepare(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ unsigned int be_id = rtd->dai_link->be_id;
+ int i, path_type, session_type;
+ struct msm_pcm_routing_bdai_data *bedai;
+ u32 channels;
+ bool playback, capture;
+ struct msm_pcm_routing_fdai_data *fdai;
+
+ if (be_id >= MSM_BACKEND_DAI_MAX) {
+ pr_err("%s: unexpected be_id %d\n", __func__, be_id);
+ return -EINVAL;
+ }
+
+ bedai = &msm_bedais[be_id];
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ path_type = ADM_PATH_PLAYBACK;
+ session_type = SESSION_TYPE_RX;
+ } else {
+ path_type = ADM_PATH_LIVE_REC;
+ session_type = SESSION_TYPE_TX;
+ }
+
+ mutex_lock(&routing_lock);
+
+ if (bedai->active == 1)
+ goto done; /* Ignore prepare if back-end already active */
+
+ /* AFE port is not active at this point. However, still
+ * go ahead setting active flag under the notion that
+ * QDSP6 is able to handle ADM starting before AFE port
+ * is started.
+ */
+ bedai->active = 1;
+ playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ capture = substream->stream == SNDRV_PCM_STREAM_CAPTURE;
+
+ for_each_set_bit(i, &bedai->fe_sessions, MSM_FRONTEND_DAI_MM_SIZE) {
+ fdai = &fe_dai_map[i][session_type];
+ if (fdai->strm_id != INVALID_SESSION) {
+ if (session_type == SESSION_TYPE_TX && fdai->be_srate &&
+ (fdai->be_srate != bedai->sample_rate)) {
+ pr_debug("%s: flush strm %d due diff BE rates\n",
+ __func__,
+ fdai->strm_id);
+
+ if (fdai->event_info.event_func)
+ fdai->event_info.event_func(
+ MSM_PCM_RT_EVT_BUF_RECFG,
+ fdai->event_info.priv_data);
+ fdai->be_srate = 0; /* might not need it */
+ }
+
+ channels = bedai->channel;
+ if ((playback || capture)
+ && ((channels == 2) || (channels == 1)) &&
+ bedai->perf_mode) {
+ adm_multi_ch_copp_open(bedai->port_id,
+ path_type,
+ bedai->sample_rate,
+ channels,
+ DEFAULT_COPP_TOPOLOGY, bedai->perf_mode);
+ pr_debug("%s:configure COPP to lowlatency mode",
+ __func__);
+ } else if ((playback || capture)
+ && (channels > 2))
+ adm_multi_ch_copp_open(bedai->port_id,
+ path_type,
+ bedai->sample_rate,
+ channels,
+ DEFAULT_COPP_TOPOLOGY, bedai->perf_mode);
+ else
+ adm_open(bedai->port_id,
+ path_type,
+ bedai->sample_rate,
+ channels,
+ DEFAULT_COPP_TOPOLOGY);
+
+ msm_pcm_routing_build_matrix(i,
+ fdai->strm_id, path_type);
+ srs_port_id = bedai->port_id;
+ srs_send_params(srs_port_id, 1, 0);
+ }
+ }
+
+done:
+ mutex_unlock(&routing_lock);
+
+ return 0;
+}
+
+static struct snd_pcm_ops msm_routing_pcm_ops = {
+ .hw_params = msm_pcm_routing_hw_params,
+ .close = msm_pcm_routing_close,
+ .prepare = msm_pcm_routing_prepare,
+};
+
+/* Not used but frame seems to require it */
+static int msm_routing_probe(struct snd_soc_platform *platform)
+{
+ return 0;
+}
+
+static int msm_asoc_routing_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ return 0;
+}
+
+static struct snd_soc_platform_driver msm_soc_routing_platform = {
+ .ops = &msm_routing_pcm_ops,
+ .probe = msm_routing_probe,
+ .pcm_new = msm_asoc_routing_pcm_new,
+ .component_driver = {
+ .name = "msm-qdsp6-routing-dai",
+ .dapm_widgets = msm_qdsp6_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
+ },
+};
+
+static int msm_routing_pcm_probe(struct platform_device *pdev)
+{
+ int ret;
+ dev_dbg(&pdev->dev, "dev name %s\n", dev_name(&pdev->dev));
+ ret = snd_soc_register_platform(&pdev->dev,
+ &msm_soc_routing_platform);
+ return ret;
+}
+
+static int msm_routing_pcm_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_platform(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id msm_routing_dt_match[] = {
+ {.compatible = "qcom,msm-pcm-routing"},
+ {}
+};
+
+static struct platform_driver msm_routing_pcm_driver = {
+ .driver = {
+ .name = "msm-pcm-routing",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_routing_dt_match,
+ },
+ .probe = msm_routing_pcm_probe,
+ .remove = (msm_routing_pcm_remove),
+};
+
+int msm_routing_check_backend_enabled(int fedai_id)
+{
+ int i;
+
+ if (fedai_id >= MSM_FRONTEND_DAI_MM_MAX_ID) {
+ /* bad ID assigned in machine driver */
+ pr_err("%s: bad MM ID\n", __func__);
+ return 0;
+ }
+ for (i = 0; i < MSM_BACKEND_DAI_MAX; i++) {
+ if (test_bit(fedai_id, &msm_bedais[i].fe_sessions))
+ return msm_bedais[i].active;
+ }
+ return 0;
+}
+module_platform_driver(msm_routing_pcm_driver);
+MODULE_DESCRIPTION("MSM routing platform driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/msm-pcm-routing.h b/sound/soc/qcom/msm-pcm-routing.h
new file mode 100644
index 0000000000000..ad63c120aad8b
--- /dev/null
+++ b/sound/soc/qcom/msm-pcm-routing.h
@@ -0,0 +1,145 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef _MSM_PCM_ROUTING_H
+#define _MSM_PCM_ROUTING_H
+#include <sound/apr_audio.h>
+
+#define LPASS_BE_PRI_I2S_RX "PRIMARY_I2S_RX"
+#define LPASS_BE_PRI_I2S_TX "PRIMARY_I2S_TX"
+#define LPASS_BE_SLIMBUS_0_RX "SLIMBUS_0_RX"
+#define LPASS_BE_SLIMBUS_0_TX "SLIMBUS_0_TX"
+#define LPASS_BE_HDMI "HDMI"
+#define LPASS_BE_INT_BT_SCO_RX "INT_BT_SCO_RX"
+#define LPASS_BE_INT_BT_SCO_TX "INT_BT_SCO_TX"
+#define LPASS_BE_INT_FM_RX "INT_FM_RX"
+#define LPASS_BE_INT_FM_TX "INT_FM_TX"
+#define LPASS_BE_AFE_PCM_RX "RT_PROXY_DAI_001_RX"
+#define LPASS_BE_AFE_PCM_TX "RT_PROXY_DAI_002_TX"
+#define LPASS_BE_AUXPCM_RX "AUX_PCM_RX"
+#define LPASS_BE_AUXPCM_TX "AUX_PCM_TX"
+#define LPASS_BE_SEC_AUXPCM_RX "SEC_AUX_PCM_RX"
+#define LPASS_BE_SEC_AUXPCM_TX "SEC_AUX_PCM_TX"
+#define LPASS_BE_VOICE_PLAYBACK_TX "VOICE_PLAYBACK_TX"
+#define LPASS_BE_INCALL_RECORD_RX "INCALL_RECORD_TX"
+#define LPASS_BE_INCALL_RECORD_TX "INCALL_RECORD_RX"
+#define LPASS_BE_SEC_I2S_RX "SECONDARY_I2S_RX"
+
+#define LPASS_BE_MI2S_RX "(Backend) MI2S_RX"
+#define LPASS_BE_MI2S_TX "(Backend) MI2S_TX"
+#define LPASS_BE_STUB_RX "(Backend) STUB_RX"
+#define LPASS_BE_STUB_TX "(Backend) STUB_TX"
+#define LPASS_BE_SLIMBUS_1_RX "(Backend) SLIMBUS_1_RX"
+#define LPASS_BE_SLIMBUS_1_TX "(Backend) SLIMBUS_1_TX"
+#define LPASS_BE_STUB_1_TX "(Backend) STUB_1_TX"
+#define LPASS_BE_SLIMBUS_3_RX "(Backend) SLIMBUS_3_RX"
+#define LPASS_BE_SLIMBUS_3_TX "(Backend) SLIMBUS_3_TX"
+#define LPASS_BE_SLIMBUS_4_RX "(Backend) SLIMBUS_4_RX"
+#define LPASS_BE_SLIMBUS_4_TX "(Backend) SLIMBUS_4_TX"
+
+/* For multimedia front-ends, asm session is allocated dynamically.
+ * Hence, asm session/multimedia front-end mapping has to be maintained.
+ * Due to this reason, additional multimedia front-end must be placed before
+ * non-multimedia front-ends.
+ */
+
+enum {
+ MSM_FRONTEND_DAI_MULTIMEDIA1 = 0,
+ MSM_FRONTEND_DAI_MULTIMEDIA2,
+ MSM_FRONTEND_DAI_MULTIMEDIA3,
+ MSM_FRONTEND_DAI_MULTIMEDIA4,
+ MSM_FRONTEND_DAI_MULTIMEDIA5,
+ MSM_FRONTEND_DAI_MULTIMEDIA6,
+ MSM_FRONTEND_DAI_MULTIMEDIA7,
+ MSM_FRONTEND_DAI_MULTIMEDIA8,
+ MSM_FRONTEND_DAI_CS_VOICE,
+ MSM_FRONTEND_DAI_VOIP,
+ MSM_FRONTEND_DAI_AFE_RX,
+ MSM_FRONTEND_DAI_AFE_TX,
+ MSM_FRONTEND_DAI_VOICE_STUB,
+ MSM_FRONTEND_DAI_VOLTE,
+ MSM_FRONTEND_DAI_VOICE2,
+ MSM_FRONTEND_DAI_VOLTE_STUB,
+ MSM_FRONTEND_DAI_VOICE2_STUB,
+ MSM_FRONTEND_DAI_MAX,
+};
+
+#define MSM_FRONTEND_DAI_MM_SIZE (MSM_FRONTEND_DAI_MULTIMEDIA8 + 1)
+#define MSM_FRONTEND_DAI_MM_MAX_ID MSM_FRONTEND_DAI_MULTIMEDIA8
+
+enum {
+ MSM_BACKEND_DAI_PRI_I2S_RX = 0,
+ MSM_BACKEND_DAI_PRI_I2S_TX,
+ MSM_BACKEND_DAI_SLIMBUS_0_RX,
+ MSM_BACKEND_DAI_SLIMBUS_0_TX,
+ MSM_BACKEND_DAI_HDMI_RX,
+ MSM_BACKEND_DAI_INT_BT_SCO_RX,
+ MSM_BACKEND_DAI_INT_BT_SCO_TX,
+ MSM_BACKEND_DAI_INT_FM_RX,
+ MSM_BACKEND_DAI_INT_FM_TX,
+ MSM_BACKEND_DAI_AFE_PCM_RX,
+ MSM_BACKEND_DAI_AFE_PCM_TX,
+ MSM_BACKEND_DAI_AUXPCM_RX,
+ MSM_BACKEND_DAI_AUXPCM_TX,
+ MSM_BACKEND_DAI_VOICE_PLAYBACK_TX,
+ MSM_BACKEND_DAI_INCALL_RECORD_RX,
+ MSM_BACKEND_DAI_INCALL_RECORD_TX,
+ MSM_BACKEND_DAI_MI2S_RX,
+ MSM_BACKEND_DAI_MI2S_TX,
+ MSM_BACKEND_DAI_SEC_I2S_RX,
+ MSM_BACKEND_DAI_SLIMBUS_1_RX,
+ MSM_BACKEND_DAI_SLIMBUS_1_TX,
+ MSM_BACKEND_DAI_SLIMBUS_4_RX,
+ MSM_BACKEND_DAI_SLIMBUS_4_TX,
+ MSM_BACKEND_DAI_SLIMBUS_3_RX,
+ MSM_BACKEND_DAI_SLIMBUS_3_TX,
+ MSM_BACKEND_DAI_EXTPROC_RX,
+ MSM_BACKEND_DAI_EXTPROC_TX,
+ MSM_BACKEND_DAI_EXTPROC_EC_TX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_RX,
+ MSM_BACKEND_DAI_SEC_AUXPCM_TX,
+ MSM_BACKEND_DAI_MAX,
+};
+
+enum msm_pcm_routing_event {
+ MSM_PCM_RT_EVT_BUF_RECFG,
+ MSM_PCM_RT_EVT_DEVSWITCH,
+ MSM_PCM_RT_EVT_MAX,
+};
+/* dai_id: front-end ID,
+ * dspst_id: DSP audio stream ID
+ * stream_type: playback or capture
+ */
+void msm_pcm_routing_reg_phy_stream(int fedai_id, bool perf_mode,
+ int dspst_id, int stream_type);
+void msm_pcm_routing_reg_psthr_stream(int fedai_id, int dspst_id,
+ int stream_type, int enable);
+
+struct msm_pcm_routing_evt {
+ void (*event_func)(enum msm_pcm_routing_event, void *);
+ void *priv_data;
+};
+
+void msm_pcm_routing_reg_phy_stream_v2(int fedai_id, bool perf_mode,
+ int dspst_id, int stream_type,
+ struct msm_pcm_routing_evt event_info);
+
+void msm_pcm_routing_dereg_phy_stream(int fedai_id, int stream_type);
+
+int lpa_set_volume(unsigned volume);
+
+int msm_routing_check_backend_enabled(int fedai_id);
+
+int multi_ch_pcm_set_volume(unsigned volume);
+
+int compressed_set_volume(unsigned volume);
+
+#endif /*_MSM_PCM_H*/
diff --git a/sound/soc/qcom/qdsp6/Makefile b/sound/soc/qcom/qdsp6/Makefile
new file mode 100644
index 0000000000000..b636583a60284
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/Makefile
@@ -0,0 +1,2 @@
+obj-y := q6asm.o q6adm.o q6afe.o
+obj-$(CONFIG_SND_SOC_QDSP6) += core/
diff --git a/sound/soc/qcom/qdsp6/core/Makefile b/sound/soc/qcom/qdsp6/core/Makefile
new file mode 100644
index 0000000000000..06089d9bced97
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/Makefile
@@ -0,0 +1,3 @@
+obj-y += apr.o apr_v1.o apr_tal.o q6core.o dsp_debug.o
+obj-y += audio_acdb.o
+obj-y += rtac.o
diff --git a/sound/soc/qcom/qdsp6/core/apr.c b/sound/soc/qcom/qdsp6/core/apr.c
new file mode 100644
index 0000000000000..ae4801e4f8369
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/apr.c
@@ -0,0 +1,649 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/remoteproc.h>
+#include <sound/qdsp6v2/apr.h>
+#include <sound/qdsp6v2/apr_tal.h>
+#include <sound/qdsp6v2/dsp_debug.h>
+
+static struct apr_q6 q6;
+static struct apr_client client[APR_DEST_MAX][APR_CLIENT_MAX];
+
+static wait_queue_head_t dsp_wait;
+static wait_queue_head_t modem_wait;
+/* Subsystem restart: QDSP6 data, functions */
+static struct workqueue_struct *apr_reset_workqueue;
+static void apr_reset_deregister(struct work_struct *work);
+struct apr_reset_work {
+ void *handle;
+ struct work_struct work;
+};
+
+struct apr_svc_table {
+ char name[64];
+ int idx;
+ int id;
+ int client_id;
+};
+
+static const struct apr_svc_table svc_tbl_audio[] = {
+ {
+ .name = "AFE",
+ .idx = 0,
+ .id = APR_SVC_AFE,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "ASM",
+ .idx = 1,
+ .id = APR_SVC_ASM,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "ADM",
+ .idx = 2,
+ .id = APR_SVC_ADM,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "CORE",
+ .idx = 3,
+ .id = APR_SVC_ADSP_CORE,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "TEST",
+ .idx = 4,
+ .id = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "MVM",
+ .idx = 5,
+ .id = APR_SVC_ADSP_MVM,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "CVS",
+ .idx = 6,
+ .id = APR_SVC_ADSP_CVS,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "CVP",
+ .idx = 7,
+ .id = APR_SVC_ADSP_CVP,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+ {
+ .name = "USM",
+ .idx = 8,
+ .id = APR_SVC_USM,
+ .client_id = APR_CLIENT_AUDIO,
+ },
+};
+
+static struct apr_svc_table svc_tbl_voice[] = {
+ {
+ .name = "VSM",
+ .idx = 0,
+ .id = APR_SVC_VSM,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "VPM",
+ .idx = 1,
+ .id = APR_SVC_VPM,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "MVS",
+ .idx = 2,
+ .id = APR_SVC_MVS,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "MVM",
+ .idx = 3,
+ .id = APR_SVC_MVM,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "CVS",
+ .idx = 4,
+ .id = APR_SVC_CVS,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "CVP",
+ .idx = 5,
+ .id = APR_SVC_CVP,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "SRD",
+ .idx = 6,
+ .id = APR_SVC_SRD,
+ .client_id = APR_CLIENT_VOICE,
+ },
+ {
+ .name = "TEST",
+ .idx = 7,
+ .id = APR_SVC_TEST_CLIENT,
+ .client_id = APR_CLIENT_VOICE,
+ },
+};
+
+enum apr_subsys_state apr_get_modem_state(void)
+{
+ return atomic_read(&q6.modem_state);
+}
+
+void apr_set_modem_state(enum apr_subsys_state state)
+{
+ atomic_set(&q6.modem_state, state);
+}
+
+enum apr_subsys_state apr_cmpxchg_modem_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.modem_state, prev, new);
+}
+
+enum apr_subsys_state apr_get_q6_state(void)
+{
+ return atomic_read(&q6.q6_state);
+}
+EXPORT_SYMBOL_GPL(apr_get_q6_state);
+
+int apr_set_q6_state(enum apr_subsys_state state)
+{
+ pr_debug("%s: setting adsp state %d\n", __func__, state);
+ if (state < APR_SUBSYS_DOWN || state > APR_SUBSYS_LOADED)
+ return -EINVAL;
+ atomic_set(&q6.q6_state, state);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apr_set_q6_state);
+
+enum apr_subsys_state apr_cmpxchg_q6_state(enum apr_subsys_state prev,
+ enum apr_subsys_state new)
+{
+ return atomic_cmpxchg(&q6.q6_state, prev, new);
+}
+
+int apr_wait_for_device_up(int dest_id)
+{
+ int rc = -1;
+ if (dest_id == APR_DEST_MODEM)
+ rc = wait_event_interruptible_timeout(modem_wait,
+ (apr_get_modem_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else if (dest_id == APR_DEST_QDSP6)
+ rc = wait_event_interruptible_timeout(dsp_wait,
+ (apr_get_q6_state() == APR_SUBSYS_UP),
+ (1 * HZ));
+ else
+ pr_err("%s: unknown dest_id %d\n", __func__, dest_id);
+ /* returns left time */
+ return rc;
+}
+
+int apr_load_adsp_image(void)
+{
+ int rc = 0;
+ struct device_node *np;
+ phandle phandle;
+ const __be32 *list;
+ int size;
+
+ mutex_lock(&q6.lock);
+ if (apr_get_q6_state() == APR_SUBSYS_UP) {
+ np = of_find_compatible_node(NULL, NULL, "qcom,apr");
+ list = of_get_property(np, "rproc", &size);
+ phandle = be32_to_cpup(list++);
+
+ q6.rproc = rproc_get_by_phandle(phandle);
+
+ if (!q6.rproc) {
+ rc = -ENODEV;
+ pr_err("APR: Unable to load q6 image, error:%d\n", rc);
+ } else {
+ rproc_boot(q6.rproc);
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ pr_debug("APR: Image is loaded, stated\n");
+ }
+ } else
+ pr_debug("APR: cannot load state %d\n", apr_get_q6_state());
+ mutex_unlock(&q6.lock);
+ return rc;
+}
+
+struct apr_client *apr_get_client(int dest_id, int client_id)
+{
+ return &client[dest_id][client_id];
+}
+
+int apr_send_pkt(void *handle, uint32_t *buf)
+{
+ struct apr_svc *svc = handle;
+ struct apr_client *clnt;
+ struct apr_hdr *hdr;
+ uint16_t dest_id;
+ uint16_t client_id;
+ uint16_t w_len;
+ unsigned long flags;
+
+ if (!handle || !buf) {
+ pr_err("APR: Wrong parameters\n");
+ return -EINVAL;
+ }
+ if (svc->need_reset) {
+ pr_err("apr: send_pkt service need reset\n");
+ return -ENETRESET;
+ }
+
+ if ((svc->dest_id == APR_DEST_QDSP6) &&
+ (apr_get_q6_state() != APR_SUBSYS_LOADED)) {
+ pr_err("%s: Still dsp is not Up\n", __func__);
+ return -ENETRESET;
+ } else if ((svc->dest_id == APR_DEST_MODEM) &&
+ (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+ pr_err("apr: Still Modem is not Up\n");
+ return -ENETRESET;
+ }
+
+ spin_lock_irqsave(&svc->w_lock, flags);
+ dest_id = svc->dest_id;
+ client_id = svc->client_id;
+ clnt = &client[dest_id][client_id];
+
+ if (!client[dest_id][client_id].handle) {
+ pr_err("APR: Still service is not yet opened\n");
+ spin_unlock_irqrestore(&svc->w_lock, flags);
+ return -EINVAL;
+ }
+ hdr = (struct apr_hdr *)buf;
+
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->src_svc = svc->id;
+ if (dest_id == APR_DEST_MODEM)
+ hdr->dest_domain = APR_DOMAIN_MODEM;
+ else if (dest_id == APR_DEST_QDSP6)
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+
+ hdr->dest_svc = svc->id;
+
+ w_len = apr_tal_write(clnt->handle, buf, hdr->pkt_size);
+ if (w_len != hdr->pkt_size)
+ pr_err("Unable to write APR pkt successfully: %d\n", w_len);
+ spin_unlock_irqrestore(&svc->w_lock, flags);
+
+ return w_len;
+}
+
+void apr_cb_func(void *buf, int len, void *priv)
+{
+ struct apr_client_data data;
+ struct apr_client *apr_client;
+ struct apr_svc *c_svc;
+ struct apr_hdr *hdr;
+ uint16_t hdr_size;
+ uint16_t msg_type;
+ uint16_t ver;
+ uint16_t src;
+ uint16_t svc;
+ uint16_t clnt;
+ int i;
+ int temp_port = 0;
+ uint32_t *ptr;
+
+ pr_debug("APR2: len = %d\n", len);
+ ptr = buf;
+ pr_debug("\n*****************\n");
+ for (i = 0; i < len/4; i++)
+ pr_debug("%x ", ptr[i]);
+ pr_debug("\n");
+ pr_debug("\n*****************\n");
+
+ if (!buf || len <= APR_HDR_SIZE) {
+ pr_err("APR: Improper apr pkt received:%p %d\n", buf, len);
+ return;
+ }
+ hdr = buf;
+
+ ver = hdr->hdr_field;
+ ver = (ver & 0x000F);
+ if (ver > APR_PKT_VER + 1) {
+ pr_err("APR: Wrong version: %d\n", ver);
+ return;
+ }
+
+ hdr_size = hdr->hdr_field;
+ hdr_size = ((hdr_size & 0x00F0) >> 0x4) * 4;
+ if (hdr_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong hdr size:%d\n", hdr_size);
+ return;
+ }
+
+ if (hdr->pkt_size < APR_HDR_SIZE) {
+ pr_err("APR: Wrong paket size\n");
+ return;
+ }
+ msg_type = hdr->hdr_field;
+ msg_type = (msg_type >> 0x08) & 0x0003;
+ if (msg_type >= APR_MSG_TYPE_MAX && msg_type != APR_BASIC_RSP_RESULT) {
+ pr_err("APR: Wrong message type: %d\n", msg_type);
+ return;
+ }
+
+ if (hdr->src_domain >= APR_DOMAIN_MAX ||
+ hdr->dest_domain >= APR_DOMAIN_MAX ||
+ hdr->src_svc >= APR_SVC_MAX ||
+ hdr->dest_svc >= APR_SVC_MAX) {
+ pr_err("APR: Wrong APR header\n");
+ return;
+ }
+
+ svc = hdr->dest_svc;
+ if (hdr->src_domain == APR_DOMAIN_MODEM) {
+ src = APR_DEST_MODEM;
+ if (svc == APR_SVC_MVS || svc == APR_SVC_MVM ||
+ svc == APR_SVC_CVS || svc == APR_SVC_CVP ||
+ svc == APR_SVC_TEST_CLIENT)
+ clnt = APR_CLIENT_VOICE;
+ else {
+ pr_err("APR: Wrong svc :%d\n", svc);
+ return;
+ }
+ } else if (hdr->src_domain == APR_DOMAIN_ADSP) {
+ src = APR_DEST_QDSP6;
+ if (svc == APR_SVC_AFE || svc == APR_SVC_ASM ||
+ svc == APR_SVC_VSM || svc == APR_SVC_VPM ||
+ svc == APR_SVC_ADM || svc == APR_SVC_ADSP_CORE ||
+ svc == APR_SVC_USM ||
+ svc == APR_SVC_TEST_CLIENT || svc == APR_SVC_ADSP_MVM ||
+ svc == APR_SVC_ADSP_CVS || svc == APR_SVC_ADSP_CVP)
+ clnt = APR_CLIENT_AUDIO;
+ else {
+ pr_err("APR: Wrong svc :%d\n", svc);
+ return;
+ }
+ } else {
+ pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain);
+ return;
+ }
+
+ pr_debug("src =%d clnt = %d\n", src, clnt);
+ apr_client = &client[src][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++)
+ if (apr_client->svc[i].id == svc) {
+ pr_debug("%d\n", apr_client->svc[i].id);
+ c_svc = &apr_client->svc[i];
+ break;
+ }
+
+ if (i == APR_SVC_MAX) {
+ pr_err("APR: service is not registered\n");
+ return;
+ }
+ pr_debug("svc_idx = %d\n", i);
+ pr_debug("%x %x %x %p %p\n", c_svc->id, c_svc->dest_id,
+ c_svc->client_id, c_svc->fn, c_svc->priv);
+ data.payload_size = hdr->pkt_size - hdr_size;
+ data.opcode = hdr->opcode;
+ data.src = src;
+ data.src_port = hdr->src_port;
+ data.dest_port = hdr->dest_port;
+ data.token = hdr->token;
+ data.msg_type = msg_type;
+ if (data.payload_size > 0)
+ data.payload = (char *)hdr + hdr_size;
+
+ temp_port = ((data.src_port >> 8) * 8) + (data.src_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", data.src_port, temp_port);
+ if (c_svc->port_cnt && c_svc->port_fn[temp_port])
+ c_svc->port_fn[temp_port](&data, c_svc->port_priv[temp_port]);
+ else if (c_svc->fn)
+ c_svc->fn(&data, c_svc->priv);
+ else
+ pr_err("APR: Rxed a packet for NULL callback\n");
+}
+
+int apr_get_svc(const char *svc_name, int dest_id, int *client_id,
+ int *svc_idx, int *svc_id)
+{
+ int i;
+ int size;
+ struct apr_svc_table *tbl;
+ int ret = 0;
+
+ if (dest_id == APR_DEST_QDSP6) {
+ tbl = (struct apr_svc_table *)&svc_tbl_audio;
+ size = ARRAY_SIZE(svc_tbl_audio);
+ } else {
+ tbl = (struct apr_svc_table *)&svc_tbl_voice;
+ size = ARRAY_SIZE(svc_tbl_voice);
+ }
+
+ for (i = 0; i < size; i++) {
+ if (!strncmp(svc_name, tbl[i].name, strlen(tbl[i].name))) {
+ *client_id = tbl[i].client_id;
+ *svc_idx = tbl[i].idx;
+ *svc_id = tbl[i].id;
+ break;
+ }
+ }
+
+ pr_debug("%s: svc_name = %s c_id = %d dest_id = %d\n",
+ __func__, svc_name, *client_id, dest_id);
+ if (i == size) {
+ pr_err("%s: APR: Wrong svc name %s\n", __func__, svc_name);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void apr_reset_deregister(struct work_struct *work)
+{
+ struct apr_svc *handle = NULL;
+ struct apr_reset_work *apr_reset =
+ container_of(work, struct apr_reset_work, work);
+
+ handle = apr_reset->handle;
+ pr_debug("%s:handle[%p]\n", __func__, handle);
+ apr_deregister(handle);
+ kfree(apr_reset);
+}
+
+int apr_deregister(void *handle)
+{
+ struct apr_svc *svc = handle;
+ struct apr_client *clnt;
+ uint16_t dest_id;
+ uint16_t client_id;
+
+ if (!handle)
+ return -EINVAL;
+
+ mutex_lock(&svc->m_lock);
+ dest_id = svc->dest_id;
+ client_id = svc->client_id;
+ clnt = &client[dest_id][client_id];
+
+ if (svc->port_cnt > 0 || svc->svc_cnt > 0) {
+ if (svc->port_cnt)
+ svc->port_cnt--;
+ else if (svc->svc_cnt)
+ svc->svc_cnt--;
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ client[dest_id][client_id].svc_cnt--;
+ svc->need_reset = 0x0;
+ }
+ } else if (client[dest_id][client_id].svc_cnt > 0) {
+ client[dest_id][client_id].svc_cnt--;
+ if (!client[dest_id][client_id].svc_cnt) {
+ svc->need_reset = 0x0;
+ pr_debug("%s: service is reset %p\n", __func__, svc);
+ }
+ }
+
+ if (!svc->port_cnt && !svc->svc_cnt) {
+ svc->priv = NULL;
+ svc->id = 0;
+ svc->fn = NULL;
+ svc->dest_id = 0;
+ svc->client_id = 0;
+ svc->need_reset = 0x0;
+ }
+ if (client[dest_id][client_id].handle &&
+ !client[dest_id][client_id].svc_cnt) {
+ apr_tal_close(client[dest_id][client_id].handle);
+ client[dest_id][client_id].handle = NULL;
+ }
+ mutex_unlock(&svc->m_lock);
+
+ return 0;
+}
+
+void apr_reset(void *handle)
+{
+ struct apr_reset_work *apr_reset_worker = NULL;
+
+ if (!handle)
+ return;
+ pr_debug("%s: handle[%p]\n", __func__, handle);
+
+ if (apr_reset_workqueue == NULL) {
+ pr_err("%s: apr_reset_workqueue is NULL\n", __func__);
+ return;
+ }
+
+ apr_reset_worker = kzalloc(sizeof(struct apr_reset_work),
+ GFP_ATOMIC);
+
+ if (apr_reset_worker == NULL) {
+ pr_err("%s: mem failure\n", __func__);
+ return;
+ }
+
+ apr_reset_worker->handle = handle;
+ INIT_WORK(&apr_reset_worker->work, apr_reset_deregister);
+ queue_work(apr_reset_workqueue, &apr_reset_worker->work);
+}
+
+static int adsp_state(int state)
+{
+ pr_info("dsp state = %d\n", state);
+ return 0;
+}
+
+/* Dispatch the Reset events to Modem and audio clients */
+void dispatch_event(unsigned long code, unsigned short proc)
+{
+ struct apr_client *apr_client;
+ struct apr_client_data data;
+ struct apr_svc *svc;
+ uint16_t clnt;
+ int i, j;
+
+ data.opcode = RESET_EVENTS;
+ data.reset_event = code;
+ data.reset_proc = proc;
+
+ clnt = APR_CLIENT_AUDIO;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+
+ clnt = APR_CLIENT_VOICE;
+ apr_client = &client[proc][clnt];
+ for (i = 0; i < APR_SVC_MAX; i++) {
+ mutex_lock(&apr_client->svc[i].m_lock);
+ if (apr_client->svc[i].fn) {
+ apr_client->svc[i].need_reset = 0x1;
+ apr_client->svc[i].fn(&data, apr_client->svc[i].priv);
+ }
+ if (apr_client->svc[i].port_cnt) {
+ svc = &(apr_client->svc[i]);
+ svc->need_reset = 0x1;
+ for (j = 0; j < APR_MAX_PORTS; j++)
+ if (svc->port_fn[j])
+ svc->port_fn[j](&data,
+ svc->port_priv[j]);
+ }
+ mutex_unlock(&apr_client->svc[i].m_lock);
+ }
+}
+
+static int __init apr_init(void)
+{
+ int i, j, k;
+
+ for (i = 0; i < APR_DEST_MAX; i++)
+ for (j = 0; j < APR_CLIENT_MAX; j++) {
+ mutex_init(&client[i][j].m_lock);
+ for (k = 0; k < APR_SVC_MAX; k++) {
+ mutex_init(&client[i][j].svc[k].m_lock);
+ spin_lock_init(&client[i][j].svc[k].w_lock);
+ }
+ }
+ apr_set_subsys_state();
+ mutex_init(&q6.lock);
+ dsp_debug_register(adsp_state);
+ apr_reset_workqueue = create_singlethread_workqueue("apr_driver");
+ if (!apr_reset_workqueue)
+ return -ENOMEM;
+ return 0;
+}
+device_initcall(apr_init);
+
+static int __init apr_late_init(void)
+{
+ int ret = 0;
+ init_waitqueue_head(&dsp_wait);
+ init_waitqueue_head(&modem_wait);
+ return ret;
+}
+late_initcall(apr_late_init);
diff --git a/sound/soc/qcom/qdsp6/core/apr_tal.c b/sound/soc/qcom/qdsp6/core/apr_tal.c
new file mode 100644
index 0000000000000..aaa646382d8d7
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/apr_tal.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <sound/qdsp6v2/apr_tal.h>
+#include <linux/soc/qcom/smd.h>
+#include <linux/io.h>
+
+struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
+
+int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data, int len)
+{
+ int ret;
+ ret = qcom_smd_send(apr_ch->ch, data, len);
+ if (ret) {
+ pr_err("apr_tal: Error in write %d\n", ret);
+ return ret;;
+ }
+ return len;
+}
+
+struct apr_svc_ch_dev *apr_tal_open(uint32_t svc, uint32_t dest,
+ uint32_t dl, apr_svc_cb_fn func, void *priv)
+{
+ int rc;
+ pr_err("apr_tal:open\n");
+ if ((svc >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) ||
+ (dl >= APR_DL_MAX)) {
+ pr_err("apr_tal: Invalid params\n");
+ return NULL;
+ }
+
+ if (apr_svc_ch[dl][dest][svc].ch) {
+ pr_err("apr_tal: This channel alreday openend\n");
+ return NULL;
+ }
+
+ if (!apr_svc_ch[dl][dest][svc].dest_state) {
+ rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].dest,
+ apr_svc_ch[dl][dest][svc].dest_state,
+ msecs_to_jiffies(APR_OPEN_TIMEOUT_MS));
+ if (rc == 0) {
+ pr_err("apr_tal:open timeout\n");
+ return NULL;
+ }
+ pr_info("apr_tal:Wakeup done\n");
+ apr_svc_ch[dl][dest][svc].dest_state = 0;
+ }
+
+#if 0
+ rc = wait_event_timeout(apr_svc_ch[dl][dest][svc].wait,
+ (apr_svc_ch[dl][dest][svc].ch->state == SMD_CHANNEL_OPENED), 5 * HZ);
+ if (rc == 0) {
+ pr_err("apr_tal:TIMEOUT for OPEN event\n");
+ apr_tal_close(&apr_svc_ch[dl][dest][svc]);
+ return NULL;
+ }
+#endif
+ if (!apr_svc_ch[dl][dest][svc].dest_state) {
+ apr_svc_ch[dl][dest][svc].dest_state = 1;
+ pr_info("apr_tal:Waiting for apr svc init\n");
+ msleep(200);
+ pr_info("apr_tal:apr svc init done\n");
+ }
+ apr_svc_ch[dl][dest][svc].func = func;
+ apr_svc_ch[dl][dest][svc].priv = priv;
+
+
+ return &apr_svc_ch[dl][dest][svc];
+}
+
+int apr_tal_close(struct apr_svc_ch_dev *apr_ch)
+{
+ if (!apr_ch->ch)
+ return -EINVAL;
+
+ apr_ch->ch = NULL;
+ apr_ch->func = NULL;
+ apr_ch->priv = NULL;
+ return 0;
+}
+
+
+static int qcom_smd_q6_callback(struct qcom_smd_channel *channel,
+ const void *data,
+ size_t count)
+{
+ struct apr_svc_ch_dev *apr_ch = qcom_smd_get_drvdata(channel);
+
+ memcpy_fromio(apr_ch->data, data, count);
+
+ if (apr_ch->func)
+ apr_ch->func(apr_ch->data, count, apr_ch->priv);
+
+ return 0;
+}
+
+static int qcom_smd_q6_probe(struct qcom_smd_device *sdev)
+{
+ int dest = APR_DEST_QDSP6;
+ int clnt = APR_CLIENT_AUDIO;
+
+ apr_svc_ch[APR_DL_SMD][APR_DEST_QDSP6][APR_CLIENT_AUDIO].ch = sdev->channel;
+
+ pr_info("apr_tal:Q6 Is Up\n");
+ apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1;
+ wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest);
+
+ dev_set_drvdata(&sdev->dev, &apr_svc_ch[APR_DL_SMD][APR_DEST_QDSP6][APR_CLIENT_AUDIO]);
+ qcom_smd_set_drvdata(&sdev->channel, &apr_svc_ch[APR_DL_SMD][APR_DEST_QDSP6][APR_CLIENT_AUDIO]);
+
+ return 0;
+}
+
+static void qcom_smd_q6_remove(struct qcom_smd_device *sdev)
+{
+}
+
+
+static const struct of_device_id qcom_smd_q6_of_match[] = {
+ { .compatible = "qcom,apr" },
+ {}
+};
+
+static struct qcom_smd_driver qcom_smd_q6_driver = {
+ .probe = qcom_smd_q6_probe,
+ .remove = qcom_smd_q6_remove,
+ .callback = qcom_smd_q6_callback,
+ .driver = {
+ .name = "qcom_smd_q6",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smd_q6_of_match,
+ },
+};
+
+static void __exit qcom_smd_q6_exit(void)
+{
+ qcom_smd_driver_unregister(&qcom_smd_q6_driver);
+}
+module_exit(qcom_smd_q6_exit);
+
+static int __init apr_tal_init(void)
+{
+
+ int i, j, k;
+
+ for (i = 0; i < APR_DL_MAX; i++)
+ for (j = 0; j < APR_DEST_MAX; j++)
+ for (k = 0; k < APR_CLIENT_MAX; k++) {
+ init_waitqueue_head(&apr_svc_ch[i][j][k].wait);
+ init_waitqueue_head(&apr_svc_ch[i][j][k].dest);
+ }
+ qcom_smd_driver_register(&qcom_smd_q6_driver);
+ return 0;
+}
+device_initcall(apr_tal_init);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
+MODULE_DESCRIPTION("Qualcomm SMD backed apr driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/qdsp6/core/apr_v1.c b/sound/soc/qcom/qdsp6/core/apr_v1.c
new file mode 100644
index 0000000000000..1547c0c9a5e97
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/apr_v1.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/err.h>
+#include <sound/qdsp6v2/apr.h>
+#include <sound/qdsp6v2/apr_tal.h>
+
+struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn,
+ uint32_t src_port, void *priv)
+{
+ struct apr_client *client;
+ int client_id = 0;
+ int svc_idx = 0;
+ int svc_id = 0;
+ int dest_id = 0;
+ int temp_port = 0;
+ struct apr_svc *svc = NULL;
+ int rc = 0;
+
+ if (!dest || !svc_name || !svc_fn)
+ return NULL;
+
+ if (!strncmp(dest, "ADSP", 4))
+ dest_id = APR_DEST_QDSP6;
+ else if (!strncmp(dest, "MODEM", 5)) {
+ dest_id = APR_DEST_MODEM;
+ } else {
+ pr_err("APR: wrong destination\n");
+ goto done;
+ }
+
+ if (dest_id == APR_DEST_QDSP6 &&
+ apr_get_q6_state() == APR_SUBSYS_DOWN) {
+ pr_info("%s: Wait for Lpass to bootup\n", __func__);
+ rc = apr_wait_for_device_up(dest_id);
+ if (rc == 0) {
+ pr_err("%s: DSP is not Up\n", __func__);
+ return NULL;
+ }
+ pr_info("%s: Lpass Up\n", __func__);
+ } else if (dest_id == APR_DEST_MODEM &&
+ (apr_get_modem_state() == APR_SUBSYS_DOWN)) {
+ pr_info("%s: Wait for modem to bootup\n", __func__);
+ rc = apr_wait_for_device_up(dest_id);
+ if (rc == 0) {
+ pr_err("%s: Modem is not Up\n", __func__);
+ return NULL;
+ }
+ pr_info("%s: modem Up\n", __func__);
+ }
+
+ if (apr_get_svc(svc_name, dest_id, &client_id, &svc_idx, &svc_id)) {
+ pr_err("%s: apr_get_svc failed\n", __func__);
+ goto done;
+ }
+
+ /* APRv1 loads ADSP image automatically */
+ apr_load_adsp_image();
+
+ client = apr_get_client(dest_id, client_id);
+ mutex_lock(&client->m_lock);
+ if (!client->handle) {
+ client->handle = apr_tal_open(client_id, dest_id, APR_DL_SMD,
+ apr_cb_func, NULL);
+ if (!client->handle) {
+ svc = NULL;
+ pr_err("APR: Unable to open handle\n");
+ mutex_unlock(&client->m_lock);
+ goto done;
+ }
+ }
+ mutex_unlock(&client->m_lock);
+ svc = &client->svc[svc_idx];
+ mutex_lock(&svc->m_lock);
+ client->id = client_id;
+ if (svc->need_reset) {
+ mutex_unlock(&svc->m_lock);
+ pr_err("APR: Service needs reset\n");
+ goto done;
+ }
+ svc->priv = priv;
+ svc->id = svc_id;
+ svc->dest_id = dest_id;
+ svc->client_id = client_id;
+ if (src_port != 0xFFFFFFFF) {
+ temp_port = ((src_port >> 8) * 8) + (src_port & 0xFF);
+ pr_debug("port = %d t_port = %d\n", src_port, temp_port);
+ if (temp_port >= APR_MAX_PORTS || temp_port < 0) {
+ pr_err("APR: temp_port out of bounds\n");
+ mutex_unlock(&svc->m_lock);
+ return NULL;
+ }
+ if (!svc->port_cnt && !svc->svc_cnt)
+ client->svc_cnt++;
+ svc->port_cnt++;
+ svc->port_fn[temp_port] = svc_fn;
+ svc->port_priv[temp_port] = priv;
+ } else {
+ if (!svc->fn) {
+ if (!svc->port_cnt && !svc->svc_cnt)
+ client->svc_cnt++;
+ svc->fn = svc_fn;
+ if (svc->port_cnt)
+ svc->svc_cnt++;
+ }
+ }
+
+ mutex_unlock(&svc->m_lock);
+done:
+ return svc;
+}
+
+void apr_set_subsys_state(void)
+{
+ apr_set_q6_state(APR_SUBSYS_UP);
+ apr_set_modem_state(APR_SUBSYS_UP);
+}
diff --git a/sound/soc/qcom/qdsp6/core/audio_acdb.c b/sound/soc/qcom/qdsp6/core/audio_acdb.c
new file mode 100644
index 0000000000000..88ca64977a1ab
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/audio_acdb.c
@@ -0,0 +1,865 @@
+/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <sound/qdsp6v2/audio_acdb.h>
+
+
+#define MAX_NETWORKS 15
+
+struct sidetone_atomic_cal {
+ atomic_t enable;
+ atomic_t gain;
+};
+
+
+struct acdb_data {
+ struct mutex acdb_mutex;
+
+ /* ANC Cal */
+ struct acdb_atomic_cal_block anc_cal;
+
+ /* AudProc Cal */
+ atomic_t asm_topology;
+ atomic_t adm_topology[MAX_AUDPROC_TYPES];
+ struct acdb_atomic_cal_block audproc_cal[MAX_AUDPROC_TYPES];
+ struct acdb_atomic_cal_block audstrm_cal[MAX_AUDPROC_TYPES];
+ struct acdb_atomic_cal_block audvol_cal[MAX_AUDPROC_TYPES];
+
+ /* VocProc Cal */
+ atomic_t voice_rx_topology;
+ atomic_t voice_tx_topology;
+ struct acdb_atomic_cal_block vocproc_cal[MAX_NETWORKS];
+ struct acdb_atomic_cal_block vocstrm_cal[MAX_NETWORKS];
+ struct acdb_atomic_cal_block vocvol_cal[MAX_NETWORKS];
+ /* size of cal block tables above*/
+ atomic_t vocproc_cal_size;
+ atomic_t vocstrm_cal_size;
+ atomic_t vocvol_cal_size;
+ /* Total size of cal data for all networks */
+ atomic_t vocproc_total_cal_size;
+ atomic_t vocstrm_total_cal_size;
+ atomic_t vocvol_total_cal_size;
+
+ /* AFE cal */
+ struct acdb_atomic_cal_block afe_cal[MAX_AUDPROC_TYPES];
+
+ /* Sidetone Cal */
+ struct sidetone_atomic_cal sidetone_cal;
+
+
+ /* Allocation information */
+// struct ion_client *ion_client;
+// struct ion_handle *ion_handle;
+ atomic_t map_handle;
+ atomic64_t paddr;
+ atomic64_t kvaddr;
+ atomic64_t mem_len;
+};
+
+static struct acdb_data acdb_data;
+static atomic_t usage_count;
+
+uint32_t get_voice_rx_topology(void)
+{
+ return atomic_read(&acdb_data.voice_rx_topology);
+}
+
+void store_voice_rx_topology(uint32_t topology)
+{
+ atomic_set(&acdb_data.voice_rx_topology, topology);
+}
+
+uint32_t get_voice_tx_topology(void)
+{
+ return atomic_read(&acdb_data.voice_tx_topology);
+}
+
+void store_voice_tx_topology(uint32_t topology)
+{
+ atomic_set(&acdb_data.voice_tx_topology, topology);
+}
+
+uint32_t get_adm_rx_topology(void)
+{
+ return atomic_read(&acdb_data.adm_topology[RX_CAL]);
+}
+
+void store_adm_rx_topology(uint32_t topology)
+{
+ atomic_set(&acdb_data.adm_topology[RX_CAL], topology);
+}
+
+uint32_t get_adm_tx_topology(void)
+{
+ return atomic_read(&acdb_data.adm_topology[TX_CAL]);
+}
+
+void store_adm_tx_topology(uint32_t topology)
+{
+ atomic_set(&acdb_data.adm_topology[TX_CAL], topology);
+}
+
+uint32_t get_asm_topology(void)
+{
+ return atomic_read(&acdb_data.asm_topology);
+}
+
+void store_asm_topology(uint32_t topology)
+{
+ atomic_set(&acdb_data.asm_topology, topology);
+}
+
+void get_all_voice_cal(struct acdb_cal_block *cal_block)
+{
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.vocproc_total_cal_size) +
+ atomic_read(&acdb_data.vocstrm_total_cal_size) +
+ atomic_read(&acdb_data.vocvol_total_cal_size);
+}
+
+void get_all_cvp_cal(struct acdb_cal_block *cal_block)
+{
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.vocproc_total_cal_size) +
+ atomic_read(&acdb_data.vocvol_total_cal_size);
+}
+
+void get_all_vocproc_cal(struct acdb_cal_block *cal_block)
+{
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.vocproc_cal[0].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.vocproc_total_cal_size);
+}
+
+void get_all_vocstrm_cal(struct acdb_cal_block *cal_block)
+{
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.vocstrm_cal[0].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.vocstrm_cal[0].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.vocstrm_total_cal_size);
+}
+
+void get_all_vocvol_cal(struct acdb_cal_block *cal_block)
+{
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.vocvol_cal[0].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.vocvol_cal[0].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.vocvol_total_cal_size);
+}
+
+void get_anc_cal(struct acdb_cal_block *cal_block)
+{
+ pr_debug("%s\n", __func__);
+
+ if (cal_block == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.anc_cal.cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.anc_cal.cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.anc_cal.cal_size);
+done:
+ return;
+}
+
+void store_anc_cal(struct cal_block *cal_block)
+{
+ pr_debug("%s,\n", __func__);
+
+ if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_block->cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ goto done;
+ }
+
+ atomic_set(&acdb_data.anc_cal.cal_kvaddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
+ atomic_set(&acdb_data.anc_cal.cal_paddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.anc_cal.cal_size,
+ cal_block->cal_size);
+done:
+ return;
+}
+
+void store_afe_cal(int32_t path, struct cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_block->cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ goto done;
+ }
+ if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.afe_cal[path].cal_kvaddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
+ atomic_set(&acdb_data.afe_cal[path].cal_paddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.afe_cal[path].cal_size,
+ cal_block->cal_size);
+done:
+ return;
+}
+
+void get_afe_cal(int32_t path, struct acdb_cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+ if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.afe_cal[path].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.afe_cal[path].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.afe_cal[path].cal_size);
+done:
+ return;
+}
+
+void store_audproc_cal(int32_t path, struct cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_block->cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.audproc_cal[path].cal_kvaddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
+ atomic_set(&acdb_data.audproc_cal[path].cal_paddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.audproc_cal[path].cal_size,
+ cal_block->cal_size);
+done:
+ return;
+}
+
+void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.audproc_cal[path].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.audproc_cal[path].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.audproc_cal[path].cal_size);
+done:
+ return;
+}
+
+void store_audstrm_cal(int32_t path, struct cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_block->cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.audstrm_cal[path].cal_kvaddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
+ atomic_set(&acdb_data.audstrm_cal[path].cal_paddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.audstrm_cal[path].cal_size,
+ cal_block->cal_size);
+done:
+ return;
+}
+
+void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.audstrm_cal[path].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.audstrm_cal[path].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.audstrm_cal[path].cal_size);
+done:
+ return;
+}
+
+void store_audvol_cal(int32_t path, struct cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_block->cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.audvol_cal[path].cal_kvaddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr));
+ atomic_set(&acdb_data.audvol_cal[path].cal_paddr,
+ cal_block->cal_offset + atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.audvol_cal[path].cal_size,
+ cal_block->cal_size);
+done:
+ return;
+}
+
+void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block)
+{
+ pr_debug("%s, path = %d\n", __func__, path);
+
+ if (cal_block == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+ if (path >= MAX_AUDPROC_TYPES || path < 0) {
+ pr_err("ACDB=> Bad path sent to %s, path: %d\n",
+ __func__, path);
+ goto done;
+ }
+
+ cal_block->cal_kvaddr =
+ atomic_read(&acdb_data.audvol_cal[path].cal_kvaddr);
+ cal_block->cal_paddr =
+ atomic_read(&acdb_data.audvol_cal[path].cal_paddr);
+ cal_block->cal_size =
+ atomic_read(&acdb_data.audvol_cal[path].cal_size);
+done:
+ return;
+}
+
+
+void store_vocproc_cal(int32_t len, struct cal_block *cal_blocks)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+
+ if (len > MAX_NETWORKS) {
+ pr_err("%s: Calibration sent for %d networks, only %d are "
+ "supported!\n", __func__, len, MAX_NETWORKS);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.vocproc_total_cal_size, 0);
+ for (i = 0; i < len; i++) {
+ if (cal_blocks[i].cal_offset >
+ atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_blocks[i].cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ atomic_set(&acdb_data.vocproc_cal[i].cal_size, 0);
+ } else {
+ atomic_add(cal_blocks[i].cal_size,
+ &acdb_data.vocproc_total_cal_size);
+ atomic_set(&acdb_data.vocproc_cal[i].cal_size,
+ cal_blocks[i].cal_size);
+ atomic_set(&acdb_data.vocproc_cal[i].cal_paddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.vocproc_cal[i].cal_kvaddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.kvaddr));
+ }
+ }
+ atomic_set(&acdb_data.vocproc_cal_size, len);
+done:
+ return;
+}
+
+void get_vocproc_cal(struct acdb_cal_data *cal_data)
+{
+ pr_debug("%s\n", __func__);
+
+ if (cal_data == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+
+ cal_data->num_cal_blocks = atomic_read(&acdb_data.vocproc_cal_size);
+ cal_data->cal_blocks = &acdb_data.vocproc_cal[0];
+done:
+ return;
+}
+
+void store_vocstrm_cal(int32_t len, struct cal_block *cal_blocks)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+
+ if (len > MAX_NETWORKS) {
+ pr_err("%s: Calibration sent for %d networks, only %d are "
+ "supported!\n", __func__, len, MAX_NETWORKS);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.vocstrm_total_cal_size, 0);
+ for (i = 0; i < len; i++) {
+ if (cal_blocks[i].cal_offset >
+ atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_blocks[i].cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ atomic_set(&acdb_data.vocstrm_cal[i].cal_size, 0);
+ } else {
+ atomic_add(cal_blocks[i].cal_size,
+ &acdb_data.vocstrm_total_cal_size);
+ atomic_set(&acdb_data.vocstrm_cal[i].cal_size,
+ cal_blocks[i].cal_size);
+ atomic_set(&acdb_data.vocstrm_cal[i].cal_paddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.vocstrm_cal[i].cal_kvaddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.kvaddr));
+ }
+ }
+ atomic_set(&acdb_data.vocstrm_cal_size, len);
+done:
+ return;
+}
+
+void get_vocstrm_cal(struct acdb_cal_data *cal_data)
+{
+ pr_debug("%s\n", __func__);
+
+ if (cal_data == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+
+ cal_data->num_cal_blocks = atomic_read(&acdb_data.vocstrm_cal_size);
+ cal_data->cal_blocks = &acdb_data.vocstrm_cal[0];
+done:
+ return;
+}
+
+void store_vocvol_cal(int32_t len, struct cal_block *cal_blocks)
+{
+ int i;
+ pr_debug("%s\n", __func__);
+
+ if (len > MAX_NETWORKS) {
+ pr_err("%s: Calibration sent for %d networks, only %d are "
+ "supported!\n", __func__, len, MAX_NETWORKS);
+ goto done;
+ }
+
+ atomic_set(&acdb_data.vocvol_total_cal_size, 0);
+ for (i = 0; i < len; i++) {
+ if (cal_blocks[i].cal_offset >
+ atomic64_read(&acdb_data.mem_len)) {
+ pr_err("%s: offset %d is > mem_len %ld\n",
+ __func__, cal_blocks[i].cal_offset,
+ (long)atomic64_read(&acdb_data.mem_len));
+ atomic_set(&acdb_data.vocvol_cal[i].cal_size, 0);
+ } else {
+ atomic_add(cal_blocks[i].cal_size,
+ &acdb_data.vocvol_total_cal_size);
+ atomic_set(&acdb_data.vocvol_cal[i].cal_size,
+ cal_blocks[i].cal_size);
+ atomic_set(&acdb_data.vocvol_cal[i].cal_paddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.paddr));
+ atomic_set(&acdb_data.vocvol_cal[i].cal_kvaddr,
+ cal_blocks[i].cal_offset +
+ atomic64_read(&acdb_data.kvaddr));
+ }
+ }
+ atomic_set(&acdb_data.vocvol_cal_size, len);
+done:
+ return;
+}
+
+void get_vocvol_cal(struct acdb_cal_data *cal_data)
+{
+ pr_debug("%s\n", __func__);
+
+ if (cal_data == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+
+ cal_data->num_cal_blocks = atomic_read(&acdb_data.vocvol_cal_size);
+ cal_data->cal_blocks = &acdb_data.vocvol_cal[0];
+done:
+ return;
+}
+
+void store_sidetone_cal(struct sidetone_cal *cal_data)
+{
+ pr_debug("%s\n", __func__);
+
+ atomic_set(&acdb_data.sidetone_cal.enable, cal_data->enable);
+ atomic_set(&acdb_data.sidetone_cal.gain, cal_data->gain);
+}
+
+
+void get_sidetone_cal(struct sidetone_cal *cal_data)
+{
+ pr_debug("%s\n", __func__);
+
+ if (cal_data == NULL) {
+ pr_err("ACDB=> NULL pointer sent to %s\n", __func__);
+ goto done;
+ }
+
+ cal_data->enable = atomic_read(&acdb_data.sidetone_cal.enable);
+ cal_data->gain = atomic_read(&acdb_data.sidetone_cal.gain);
+done:
+ return;
+}
+
+static int acdb_open(struct inode *inode, struct file *f)
+{
+ s32 result = 0;
+ pr_debug("%s\n", __func__);
+
+ if (atomic64_read(&acdb_data.mem_len)) {
+ pr_debug("%s: ACDB opened but memory allocated, "
+ "using existing allocation!\n",
+ __func__);
+ }
+
+ atomic_inc(&usage_count);
+ return result;
+}
+
+static long acdb_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ int32_t result = 0;
+ int32_t size;
+ int32_t map_fd;
+ uint32_t topology;
+ struct cal_block data[MAX_NETWORKS];
+ pr_debug("%s\n", __func__);
+
+ switch (cmd) {
+
+ case AUDIO_REGISTER_PMEM:
+ pr_debug("AUDIO_REGISTER_PMEM\n");
+ if (atomic_read(&acdb_data.mem_len)) {
+ //deregister_memory();
+ pr_debug("Remove the existing memory\n");
+ }
+
+ if (copy_from_user(&map_fd, (void *)arg, sizeof(map_fd))) {
+ pr_err("%s: fail to copy memory handle!\n", __func__);
+ result = -EFAULT;
+ } else {
+ atomic_set(&acdb_data.map_handle, map_fd);
+ //result = register_memory();
+ }
+ goto done;
+
+ case AUDIO_DEREGISTER_PMEM:
+ pr_debug("AUDIO_DEREGISTER_PMEM\n");
+ //deregister_memory();
+ goto done;
+ case AUDIO_SET_VOICE_RX_TOPOLOGY:
+ if (copy_from_user(&topology, (void *)arg,
+ sizeof(topology))) {
+ pr_err("%s: fail to copy topology!\n", __func__);
+ result = -EFAULT;
+ }
+ store_voice_rx_topology(topology);
+ goto done;
+ case AUDIO_SET_VOICE_TX_TOPOLOGY:
+ if (copy_from_user(&topology, (void *)arg,
+ sizeof(topology))) {
+ pr_err("%s: fail to copy topology!\n", __func__);
+ result = -EFAULT;
+ }
+ store_voice_tx_topology(topology);
+ goto done;
+ case AUDIO_SET_ADM_RX_TOPOLOGY:
+ if (copy_from_user(&topology, (void *)arg,
+ sizeof(topology))) {
+ pr_err("%s: fail to copy topology!\n", __func__);
+ result = -EFAULT;
+ }
+ store_adm_rx_topology(topology);
+ goto done;
+ case AUDIO_SET_ADM_TX_TOPOLOGY:
+ if (copy_from_user(&topology, (void *)arg,
+ sizeof(topology))) {
+ pr_err("%s: fail to copy topology!\n", __func__);
+ result = -EFAULT;
+ }
+ store_adm_tx_topology(topology);
+ goto done;
+ case AUDIO_SET_ASM_TOPOLOGY:
+ if (copy_from_user(&topology, (void *)arg,
+ sizeof(topology))) {
+ pr_err("%s: fail to copy topology!\n", __func__);
+ result = -EFAULT;
+ }
+ store_asm_topology(topology);
+ goto done;
+ }
+
+ if (copy_from_user(&size, (void *) arg, sizeof(size))) {
+
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (size <= 0) {
+ pr_err("%s: Invalid size sent to driver: %d\n",
+ __func__, size);
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (copy_from_user(data, (void *)(arg + sizeof(size)), size)) {
+
+ pr_err("%s: fail to copy table size %d\n", __func__, size);
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (data == NULL) {
+ pr_err("%s: NULL pointer sent to driver!\n", __func__);
+ result = -EFAULT;
+ goto done;
+ }
+
+ switch (cmd) {
+ case AUDIO_SET_AUDPROC_TX_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audproc_cal(TX_CAL, data);
+ break;
+ case AUDIO_SET_AUDPROC_RX_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audproc_cal(RX_CAL, data);
+ break;
+ case AUDIO_SET_AUDPROC_TX_STREAM_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audstrm_cal(TX_CAL, data);
+ break;
+ case AUDIO_SET_AUDPROC_RX_STREAM_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audstrm_cal(RX_CAL, data);
+ break;
+ case AUDIO_SET_AUDPROC_TX_VOL_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audvol_cal(TX_CAL, data);
+ break;
+ case AUDIO_SET_AUDPROC_RX_VOL_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More Audproc Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_audvol_cal(RX_CAL, data);
+ break;
+ case AUDIO_SET_AFE_TX_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More AFE Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_afe_cal(TX_CAL, data);
+ break;
+ case AUDIO_SET_AFE_RX_CAL:
+ if (size > sizeof(struct cal_block))
+ pr_err("%s: More AFE Cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_afe_cal(RX_CAL, data);
+ break;
+ case AUDIO_SET_VOCPROC_CAL:
+ store_vocproc_cal(size / sizeof(struct cal_block), data);
+ break;
+ case AUDIO_SET_VOCPROC_STREAM_CAL:
+ store_vocstrm_cal(size / sizeof(struct cal_block), data);
+ break;
+ case AUDIO_SET_VOCPROC_VOL_CAL:
+ store_vocvol_cal(size / sizeof(struct cal_block), data);
+ break;
+ case AUDIO_SET_SIDETONE_CAL:
+ if (size > sizeof(struct sidetone_cal))
+ pr_err("%s: More sidetone cal then expected, "
+ "size received: %d\n", __func__, size);
+ store_sidetone_cal((struct sidetone_cal *)data);
+ break;
+ case AUDIO_SET_ANC_CAL:
+ store_anc_cal(data);
+ break;
+ default:
+ pr_err("ACDB=> ACDB ioctl not found!\n");
+ }
+
+done:
+ return result;
+}
+
+static int acdb_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int result = 0;
+ int size = vma->vm_end - vma->vm_start;
+
+ pr_debug("%s\n", __func__);
+
+ if (atomic64_read(&acdb_data.mem_len)) {
+ if (size <= atomic64_read(&acdb_data.mem_len)) {
+ vma->vm_page_prot = pgprot_noncached(
+ vma->vm_page_prot);
+ result = remap_pfn_range(vma,
+ vma->vm_start,
+ atomic64_read(&acdb_data.paddr) >> PAGE_SHIFT,
+ size,
+ vma->vm_page_prot);
+ } else {
+ pr_err("%s: Not enough memory!\n", __func__);
+ result = -ENOMEM;
+ }
+ } else {
+ pr_err("%s: memory is not allocated, yet!\n", __func__);
+ result = -ENODEV;
+ }
+
+ return result;
+}
+
+static int acdb_release(struct inode *inode, struct file *f)
+{
+ s32 result = 0;
+
+ atomic_dec(&usage_count);
+ atomic_read(&usage_count);
+
+ pr_debug("%s: ref count %d!\n", __func__,
+ atomic_read(&usage_count));
+
+ if (atomic_read(&usage_count) >= 1)
+ result = -EBUSY;
+
+ return result;
+}
+
+static const struct file_operations acdb_fops = {
+ .owner = THIS_MODULE,
+ .open = acdb_open,
+ .release = acdb_release,
+ .unlocked_ioctl = acdb_ioctl,
+ .mmap = acdb_mmap,
+};
+
+struct miscdevice acdb_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msm_acdb",
+ .fops = &acdb_fops,
+};
+
+static int __init acdb_init(void)
+{
+ memset(&acdb_data, 0, sizeof(acdb_data));
+ mutex_init(&acdb_data.acdb_mutex);
+ atomic_set(&usage_count, 0);
+ return misc_register(&acdb_misc);
+}
+
+static void __exit acdb_exit(void)
+{
+}
+
+module_init(acdb_init);
+module_exit(acdb_exit);
+
+MODULE_DESCRIPTION("MSM 8x60 Audio ACDB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/qdsp6/core/dsp_debug.c b/sound/soc/qcom/qdsp6/core/dsp_debug.c
new file mode 100644
index 0000000000000..4e242e395b0c5
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/dsp_debug.c
@@ -0,0 +1,259 @@
+/* arch/arm/mach-msm/qdsp6/dsp_dump.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <asm/atomic.h>
+
+#include <sound/qdsp6v2/dsp_debug.h>
+
+static wait_queue_head_t dsp_wait;
+static int dsp_has_crashed;
+static int dsp_wait_count;
+
+static atomic_t dsp_crash_count = ATOMIC_INIT(0);
+dsp_state_cb cb_ptr;
+
+void q6audio_dsp_not_responding(void)
+{
+ int i;
+
+ if (cb_ptr)
+ cb_ptr(DSP_STATE_CRASHED);
+ if (atomic_add_return(1, &dsp_crash_count) != 1) {
+ pr_err("q6audio_dsp_not_responding() \
+ - parking additional crasher...\n");
+ for (i = 0; i < 600; i++)
+ msleep(1000);
+ }
+ if (dsp_wait_count) {
+ dsp_has_crashed = 1;
+ wake_up(&dsp_wait);
+
+ while (dsp_has_crashed != 2)
+ wait_event(dsp_wait, dsp_has_crashed == 2);
+ } else {
+ pr_err("q6audio_dsp_not_responding() - no waiter?\n");
+ }
+ if (cb_ptr)
+ cb_ptr(DSP_STATE_CRASH_DUMP_DONE);
+}
+
+static int dsp_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+#define DSP_NMI_ADDR 0x28800010
+
+static ssize_t dsp_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ char cmd[32];
+ void __iomem *ptr;
+ void *mem_buffer;
+
+ if (count >= sizeof(cmd))
+ return -EINVAL;
+ if (copy_from_user(cmd, buf, count))
+ return -EFAULT;
+ cmd[count] = 0;
+
+ if ((count > 1) && (cmd[count-1] == '\n'))
+ cmd[count-1] = 0;
+
+ if (!strcmp(cmd, "wait-for-crash")) {
+ while (!dsp_has_crashed) {
+ int res;
+ dsp_wait_count++;
+ res = wait_event_interruptible(dsp_wait,
+ dsp_has_crashed);
+ if (res < 0) {
+ dsp_wait_count--;
+ return res;
+ }
+ }
+ /* assert DSP NMI */
+ mem_buffer = ioremap(DSP_NMI_ADDR, 0x16);
+ if (IS_ERR((void *)mem_buffer)) {
+ pr_err("%s:map_buffer failed, error = %ld\n", __func__,
+ PTR_ERR((void *)mem_buffer));
+ return -ENOMEM;
+ }
+ ptr = mem_buffer;
+ if (!ptr) {
+ pr_err("Unable to map DSP NMI\n");
+ return -EFAULT;
+ }
+ writel(0x1, (void *)ptr);
+ iounmap(mem_buffer);
+ } else if (!strcmp(cmd, "boom")) {
+ q6audio_dsp_not_responding();
+ } else if (!strcmp(cmd, "continue-crash")) {
+ dsp_has_crashed = 2;
+ wake_up(&dsp_wait);
+ } else {
+ pr_err("[%s:%s] unknown dsp_debug command: %s\n", __FILE__,
+ __func__, cmd);
+ }
+
+ return count;
+}
+
+static unsigned copy_ok_count;
+static uint32_t dsp_ram_size;
+static uint32_t dsp_ram_base;
+
+static ssize_t dsp_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ size_t actual = 0;
+ size_t mapsize = PAGE_SIZE;
+ unsigned addr;
+ void __iomem *ptr;
+ void *mem_buffer;
+
+ if ((dsp_ram_base == 0) || (dsp_ram_size == 0)) {
+ pr_err("[%s:%s] Memory Invalid or not initialized, Base = 0x%x,"
+ " size = 0x%x\n", __FILE__,
+ __func__, dsp_ram_base, dsp_ram_size);
+ return -EINVAL;
+ }
+
+ if (*pos >= dsp_ram_size)
+ return 0;
+
+ if (*pos & (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ addr = (*pos + dsp_ram_base);
+
+ /* don't blow up if we're unaligned */
+ if (addr & (PAGE_SIZE - 1))
+ mapsize *= 2;
+
+ while (count >= PAGE_SIZE) {
+ mem_buffer = ioremap(addr, mapsize);
+ if (IS_ERR((void *)mem_buffer)) {
+ pr_err("%s:map_buffer failed, error = %ld\n",
+ __func__, PTR_ERR((void *)mem_buffer));
+ return -ENOMEM;
+ }
+ ptr = mem_buffer;
+ if (!ptr) {
+ pr_err("[%s:%s] map error @ %x\n", __FILE__,
+ __func__, addr);
+ return -EFAULT;
+ }
+ if (copy_to_user(buf, ptr, PAGE_SIZE)) {
+ iounmap(mem_buffer);
+ pr_err("[%s:%s] copy error @ %p\n", __FILE__,
+ __func__, buf);
+ return -EFAULT;
+ }
+ copy_ok_count += PAGE_SIZE;
+ iounmap(mem_buffer);
+ addr += PAGE_SIZE;
+ buf += PAGE_SIZE;
+ actual += PAGE_SIZE;
+ count -= PAGE_SIZE;
+ }
+
+ *pos += actual;
+ return actual;
+}
+
+static int dsp_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+int dsp_debug_register(dsp_state_cb ptr)
+{
+ if (ptr == NULL)
+ return -EINVAL;
+ cb_ptr = ptr;
+
+ return 0;
+}
+
+static int dspcrashd_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct resource *res;
+ int *pdata;
+
+ pdata = pdev->dev.platform_data;
+ res = platform_get_resource_byname(pdev, IORESOURCE_DMA,
+ "msm_dspcrashd");
+ if (!res) {
+ pr_err("%s: failed to get resources for dspcrashd\n", __func__);
+ return -ENODEV;
+ }
+
+ dsp_ram_base = res->start;
+ dsp_ram_size = res->end - res->start;
+ pr_info("%s: Platform driver values: Base = 0x%x, Size = 0x%x,"
+ "pdata = 0x%x\n", __func__,
+ dsp_ram_base, dsp_ram_size, *pdata);
+ return rc;
+}
+
+static const struct file_operations dsp_fops = {
+ .owner = THIS_MODULE,
+ .open = dsp_open,
+ .read = dsp_read,
+ .write = dsp_write,
+ .release = dsp_release,
+};
+
+static struct miscdevice dsp_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "dsp_debug",
+ .fops = &dsp_fops,
+};
+
+static struct platform_driver dspcrashd_driver = {
+ .probe = dspcrashd_probe,
+ .driver = { .name = "msm_dspcrashd"}
+};
+
+static int __init dsp_init(void)
+{
+ int rc = 0;
+ init_waitqueue_head(&dsp_wait);
+ rc = platform_driver_register(&dspcrashd_driver);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("%s: platform_driver_register for dspcrashd failed\n",
+ __func__);
+ }
+ return misc_register(&dsp_misc);
+}
+
+static int __exit dsp_exit(void)
+{
+ platform_driver_unregister(&dspcrashd_driver);
+ return 0;
+}
+
+device_initcall(dsp_init);
diff --git a/sound/soc/qcom/qdsp6/core/q6audio_common.h b/sound/soc/qcom/qdsp6/core/q6audio_common.h
new file mode 100644
index 0000000000000..68efafd650368
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/q6audio_common.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+/* For Decoders */
+#ifndef __Q6_AUDIO_COMMON_H__
+#define __Q6_AUDIO_COMMON_H__
+
+#include <sound/apr_audio.h>
+#include <sound/q6asm.h>
+
+void q6_audio_cb(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv);
+
+void audio_aio_cb(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *audio);
+
+
+/* For Encoders */
+void q6asm_in_cb(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv);
+
+void audio_in_get_dsp_frames(void *audio,
+ uint32_t token, uint32_t *payload);
+
+#endif /*__Q6_AUDIO_COMMON_H__*/
diff --git a/sound/soc/qcom/qdsp6/core/q6core.c b/sound/soc/qcom/qdsp6/core/q6core.c
new file mode 100644
index 0000000000000..e5f1d4ed7a4ce
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/q6core.c
@@ -0,0 +1,406 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <sound/qdsp6v2/apr.h>
+#include "q6core.h"
+
+#define TIMEOUT_MS 1000
+
+static struct apr_svc *apr_handle_q;
+static struct apr_svc *apr_handle_m;
+static struct apr_svc *core_handle_q;
+
+static int32_t query_adsp_ver;
+static wait_queue_head_t adsp_version_wait;
+static uint32_t adsp_version;
+
+static wait_queue_head_t bus_bw_req_wait;
+static u32 bus_bw_resp_received;
+
+static struct dentry *dentry;
+static char l_buf[4096];
+
+static int32_t aprv2_core_fn_q(struct apr_client_data *data, void *priv)
+{
+ struct adsp_get_version *payload;
+ uint32_t *payload1;
+ struct adsp_service_info *svc_info;
+ int i;
+
+ pr_info("core msg: payload len = %u, apr resp opcode = 0x%X\n",
+ data->payload_size, data->opcode);
+
+ switch (data->opcode) {
+
+ case APR_BASIC_RSP_RESULT:{
+
+ if (data->payload_size == 0) {
+ pr_err("%s: APR_BASIC_RSP_RESULT No Payload ",
+ __func__);
+ return 0;
+ }
+
+ payload1 = data->payload;
+
+ switch (payload1[0]) {
+
+ case ADSP_CMD_SET_POWER_COLLAPSE_STATE:
+ pr_info("Cmd = ADSP_CMD_SET_POWER_COLLAPSE_STATE"
+ " status[0x%x]\n", payload1[1]);
+ break;
+ case ADSP_CMD_REMOTE_BUS_BW_REQUEST:
+ pr_info("%s: cmd = ADSP_CMD_REMOTE_BUS_BW_REQUEST"
+ " status = 0x%x\n", __func__, payload1[1]);
+
+ bus_bw_resp_received = 1;
+ wake_up(&bus_bw_req_wait);
+ break;
+ default:
+ pr_err("Invalid cmd rsp[0x%x][0x%x]\n",
+ payload1[0], payload1[1]);
+ break;
+ }
+ break;
+ }
+ case ADSP_GET_VERSION_RSP:{
+ if (data->payload_size) {
+ payload = data->payload;
+ if (query_adsp_ver == 1) {
+ query_adsp_ver = 0;
+ adsp_version = payload->build_id;
+ wake_up(&adsp_version_wait);
+ }
+ svc_info = (struct adsp_service_info *)
+ ((char *)payload + sizeof(struct adsp_get_version));
+ pr_info("----------------------------------------\n");
+ pr_info("Build id = %x\n", payload->build_id);
+ pr_info("Number of services= %x\n", payload->svc_cnt);
+ pr_info("----------------------------------------\n");
+ for (i = 0; i < payload->svc_cnt; i++) {
+ pr_info("svc-id[%d]\tver[%x.%x]\n",
+ svc_info[i].svc_id,
+ (svc_info[i].svc_ver & 0xFFFF0000)
+ >> 16,
+ (svc_info[i].svc_ver & 0xFFFF));
+ }
+ pr_info("-----------------------------------------\n");
+ } else
+ pr_info("zero payload for ADSP_GET_VERSION_RSP\n");
+ break;
+ }
+ case RESET_EVENTS:{
+ pr_debug("Reset event received in Core service");
+ apr_reset(core_handle_q);
+ core_handle_q = NULL;
+ break;
+ }
+
+ default:
+ pr_err("Message id from adsp core svc: %d\n", data->opcode);
+ break;
+ }
+
+ return 0;
+}
+
+static int32_t aprv2_debug_fn_q(struct apr_client_data *data, void *priv)
+{
+ pr_debug("Q6_Payload Length = %d\n", data->payload_size);
+ if (memcmp(data->payload, l_buf + 20, data->payload_size))
+ pr_info("FAIL: %d\n", data->payload_size);
+ else
+ pr_info("SUCCESS: %d\n", data->payload_size);
+ return 0;
+}
+
+static int32_t aprv2_debug_fn_m(struct apr_client_data *data, void *priv)
+{
+ pr_info("M_Payload Length = %d\n", data->payload_size);
+ return 0;
+}
+
+static ssize_t apr_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ pr_debug("apr debugfs opened\n");
+ return 0;
+}
+
+void core_open(void)
+{
+ if (core_handle_q == NULL) {
+ core_handle_q = apr_register("ADSP", "CORE",
+ aprv2_core_fn_q, 0xFFFFFFFF, NULL);
+ }
+ pr_info("Open_q %p\n", core_handle_q);
+ if (core_handle_q == NULL) {
+ pr_err("%s: Unable to register CORE\n", __func__);
+ }
+}
+
+int core_req_bus_bandwith(u16 bus_id, u32 ab_bps, u32 ib_bps)
+{
+ struct adsp_cmd_remote_bus_bw_request bus_bw_req;
+ int ret;
+
+ pr_debug("%s: bus_id %u ab_bps %u ib_bps %u\n",
+ __func__, bus_id, ab_bps, ib_bps);
+
+ core_open();
+ if (core_handle_q == NULL) {
+ pr_info("%s: apr registration for CORE failed\n", __func__);
+ return -ENODEV;
+ }
+
+ bus_bw_req.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ bus_bw_req.hdr.pkt_size = sizeof(struct adsp_cmd_remote_bus_bw_request);
+
+ bus_bw_req.hdr.src_port = 0;
+ bus_bw_req.hdr.dest_port = 0;
+ bus_bw_req.hdr.token = 0;
+ bus_bw_req.hdr.opcode = ADSP_CMD_REMOTE_BUS_BW_REQUEST;
+
+ bus_bw_req.bus_identifier = bus_id;
+ bus_bw_req.reserved = 0;
+ bus_bw_req.ab_bps = ab_bps;
+ bus_bw_req.ib_bps = ib_bps;
+
+ bus_bw_resp_received = 0;
+ ret = apr_send_pkt(core_handle_q, (uint32_t *) &bus_bw_req);
+ if (ret < 0) {
+ pr_err("%s: CORE bus bw request failed\n", __func__);
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(bus_bw_req_wait, (bus_bw_resp_received == 1),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -ETIME;
+ goto fail_cmd;
+ }
+
+ return 0;
+
+fail_cmd:
+ return ret;
+}
+
+uint32_t core_get_adsp_version(void)
+{
+ struct apr_hdr *hdr;
+ int32_t rc = 0, ret = 0;
+ core_open();
+ if (core_handle_q) {
+ hdr = (struct apr_hdr *)l_buf;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr->pkt_size = APR_PKT_SIZE(APR_HDR_SIZE, 0);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = 0;
+ hdr->opcode = ADSP_GET_VERSION;
+
+ apr_send_pkt(core_handle_q, (uint32_t *)l_buf);
+ query_adsp_ver = 1;
+ pr_info("Write_q\n");
+ ret = wait_event_timeout(adsp_version_wait,
+ (query_adsp_ver == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ rc = adsp_version;
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ rc = -ENODEV;
+ }
+ } else
+ pr_info("apr registration failed\n");
+ return rc;
+}
+EXPORT_SYMBOL(core_get_adsp_version);
+
+static ssize_t apr_debug_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ static int t_len;
+
+ len = count > 63 ? 63 : count;
+ if (copy_from_user(l_buf + 20 , buf, len)) {
+ pr_info("Unable to copy data from user space\n");
+ return -EFAULT;
+ }
+ l_buf[len + 20] = 0;
+ if (l_buf[len + 20 - 1] == '\n') {
+ l_buf[len + 20 - 1] = 0;
+ len--;
+ }
+ if (!strncmp(l_buf + 20, "open_q", 64)) {
+ apr_handle_q = apr_register("ADSP", "TEST", aprv2_debug_fn_q,
+ 0xFFFFFFFF, NULL);
+ pr_info("Open_q %p\n", apr_handle_q);
+ } else if (!strncmp(l_buf + 20, "open_m", 64)) {
+ apr_handle_m = apr_register("MODEM", "TEST", aprv2_debug_fn_m,
+ 0xFFFFFFFF, NULL);
+ pr_info("Open_m %p\n", apr_handle_m);
+ } else if (!strncmp(l_buf + 20, "write_q", 64)) {
+ struct apr_hdr *hdr;
+
+ t_len++;
+ t_len = t_len % 450;
+ if (!t_len % 99)
+ msleep(2000);
+ hdr = (struct apr_hdr *)l_buf;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ hdr->pkt_size = APR_PKT_SIZE(20, t_len);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = 0;
+ hdr->opcode = 0x12345678;
+ memset(l_buf + 20, 9, 4060);
+
+ apr_send_pkt(apr_handle_q, (uint32_t *)l_buf);
+ pr_debug("Write_q\n");
+ } else if (!strncmp(l_buf + 20, "write_m", 64)) {
+ struct apr_hdr *hdr;
+
+ hdr = (struct apr_hdr *)l_buf;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ hdr->pkt_size = APR_PKT_SIZE(20, 8);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = 0;
+ hdr->opcode = 0x12345678;
+ memset(l_buf + 30, 9, 4060);
+
+ apr_send_pkt(apr_handle_m, (uint32_t *)l_buf);
+ pr_info("Write_m\n");
+ } else if (!strncmp(l_buf + 20, "write_q4", 64)) {
+ struct apr_hdr *hdr;
+
+ hdr = (struct apr_hdr *)l_buf;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ hdr->pkt_size = APR_PKT_SIZE(20, 4076);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = 0;
+ hdr->opcode = 0x12345678;
+ memset(l_buf + 30, 9, 4060);
+
+ apr_send_pkt(apr_handle_q, (uint32_t *)l_buf);
+ pr_info("Write_q\n");
+ } else if (!strncmp(l_buf + 20, "write_m4", 64)) {
+ struct apr_hdr *hdr;
+
+ hdr = (struct apr_hdr *)l_buf;
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ hdr->pkt_size = APR_PKT_SIZE(20, 4076);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ hdr->token = 0;
+ hdr->opcode = 0x12345678;
+ memset(l_buf + 30, 9, 4060);
+
+ apr_send_pkt(apr_handle_m, (uint32_t *)l_buf);
+ pr_info("Write_m\n");
+ } else if (!strncmp(l_buf + 20, "close", 64)) {
+ if (apr_handle_q)
+ apr_deregister(apr_handle_q);
+ } else if (!strncmp(l_buf + 20, "loaded", 64)) {
+ apr_set_q6_state(APR_SUBSYS_LOADED);
+ } else if (!strncmp(l_buf + 20, "boom", 64)) {
+ q6audio_dsp_not_responding();
+ } else if (!strncmp(l_buf + 20, "dsp_ver", 64)) {
+ core_get_adsp_version();
+ } else if (!strncmp(l_buf + 20, "en_pwr_col", 64)) {
+ struct adsp_power_collapse pc;
+
+ core_open();
+ if (core_handle_q) {
+ pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(uint32_t));;
+ pc.hdr.src_port = 0;
+ pc.hdr.dest_port = 0;
+ pc.hdr.token = 0;
+ pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE;
+ pc.power_collapse = 0x00000000;
+ apr_send_pkt(core_handle_q, (uint32_t *)&pc);
+ pr_info("Write_q :enable power collapse\n");
+ }
+ } else if (!strncmp(l_buf + 20, "dis_pwr_col", 64)) {
+ struct adsp_power_collapse pc;
+
+ core_open();
+ if (core_handle_q) {
+ pc.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_EVENT,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ pc.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(uint32_t));
+ pc.hdr.src_port = 0;
+ pc.hdr.dest_port = 0;
+ pc.hdr.token = 0;
+ pc.hdr.opcode = ADSP_CMD_SET_POWER_COLLAPSE_STATE;
+ pc.power_collapse = 0x00000001;
+ apr_send_pkt(core_handle_q, (uint32_t *)&pc);
+ pr_info("Write_q:disable power collapse\n");
+ }
+ } else
+ pr_info("Unknown Command\n");
+
+ return count;
+}
+
+static const struct file_operations apr_debug_fops = {
+ .write = apr_debug_write,
+ .open = apr_debug_open,
+};
+
+static int __init core_init(void)
+{
+ init_waitqueue_head(&bus_bw_req_wait);
+ bus_bw_resp_received = 0;
+
+ query_adsp_ver = 0;
+ init_waitqueue_head(&adsp_version_wait);
+ adsp_version = 0;
+
+ core_handle_q = NULL;
+
+#ifdef CONFIG_DEBUG_FS
+ dentry = debugfs_create_file("apr", S_IFREG | S_IRUGO | S_IWUSR
+ | S_IWGRP, NULL, (void *) NULL, &apr_debug_fops);
+#endif /* CONFIG_DEBUG_FS */
+
+ return 0;
+}
+
+device_initcall(core_init);
diff --git a/sound/soc/qcom/qdsp6/core/q6core.h b/sound/soc/qcom/qdsp6/core/q6core.h
new file mode 100644
index 0000000000000..97a81cbe75f1c
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/q6core.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __Q6CORE_H__
+#define __Q6CORE_H__
+#include <sound/qdsp6v2/apr.h>
+
+
+#define ADSP_CMD_REMOTE_BUS_BW_REQUEST 0x0001115D
+#define AUDIO_IF_BUS_ID 1
+
+struct adsp_cmd_remote_bus_bw_request {
+ struct apr_hdr hdr;
+ u16 bus_identifier;
+ u16 reserved;
+ u32 ab_bps;
+ u32 ib_bps;
+} __packed;
+
+#define ADSP_GET_VERSION 0x00011152
+#define ADSP_GET_VERSION_RSP 0x00011153
+
+struct adsp_get_version {
+ uint32_t build_id;
+ uint32_t svc_cnt;
+};
+
+struct adsp_service_info {
+ uint32_t svc_id;
+ uint32_t svc_ver;
+};
+
+#define ADSP_CMD_SET_POWER_COLLAPSE_STATE 0x0001115C
+struct adsp_power_collapse {
+ struct apr_hdr hdr;
+ uint32_t power_collapse;
+};
+
+int core_req_bus_bandwith(u16 bus_id, u32 ab_bps, u32 ib_bps);
+
+uint32_t core_get_adsp_version(void);
+
+#endif /* __Q6CORE_H__ */
diff --git a/sound/soc/qcom/qdsp6/core/rtac.c b/sound/soc/qcom/qdsp6/core/rtac.c
new file mode 100644
index 0000000000000..175568fe53a2a
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/core/rtac.c
@@ -0,0 +1,1046 @@
+/* Copyright (c) 2011, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <sound/qdsp6v2/audio_acdb.h>
+#include <sound/qdsp6v2/rtac.h>
+#include "q6audio_common.h"
+#include <sound/q6afe.h>
+
+#define CONFIG_RTAC
+#ifndef CONFIG_RTAC
+
+void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id) {}
+void rtac_remove_adm_device(u32 port_id) {}
+void rtac_remove_popp_from_adm_devices(u32 popp_id) {}
+void rtac_set_adm_handle(void *handle) {}
+bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size)
+ {return false; }
+void rtac_set_asm_handle(u32 session_id, void *handle) {}
+bool rtac_make_asm_callback(u32 session_id, uint32_t *payload,
+ u32 payload_size) {return false; }
+void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port,
+ u32 tx_afe_port, u32 session_id) {}
+void rtac_remove_voice(u32 cvs_handle) {}
+void rtac_set_voice_handle(u32 mode, void *handle) {}
+bool rtac_make_voice_callback(u32 mode, uint32_t *payload,
+ u32 payload_size) {return false; }
+
+#else
+
+#define VOICE_CMD_SET_PARAM 0x00011006
+#define VOICE_CMD_GET_PARAM 0x00011007
+#define VOICE_EVT_GET_PARAM_ACK 0x00011008
+
+/* Max size of payload (buf size - apr header) */
+#define MAX_PAYLOAD_SIZE 4076
+#define RTAC_MAX_ACTIVE_DEVICES 4
+#define RTAC_MAX_ACTIVE_VOICE_COMBOS 2
+#define RTAC_MAX_ACTIVE_POPP 8
+#define RTAC_BUF_SIZE 4096
+
+#define TIMEOUT_MS 1000
+
+/* APR data */
+struct rtac_apr_data {
+ void *apr_handle;
+ atomic_t cmd_state;
+ wait_queue_head_t cmd_wait;
+};
+
+static struct rtac_apr_data rtac_adm_apr_data;
+static struct rtac_apr_data rtac_asm_apr_data[SESSION_MAX+1];
+static struct rtac_apr_data rtac_voice_apr_data[RTAC_VOICE_MODES];
+
+
+/* ADM info & APR */
+struct rtac_adm_data {
+ uint32_t topology_id;
+ uint32_t afe_port;
+ uint32_t copp;
+ uint32_t num_of_popp;
+ uint32_t popp[RTAC_MAX_ACTIVE_POPP];
+};
+
+struct rtac_adm {
+ uint32_t num_of_dev;
+ struct rtac_adm_data device[RTAC_MAX_ACTIVE_DEVICES];
+};
+static struct rtac_adm rtac_adm_data;
+static u32 rtac_adm_payload_size;
+static u32 rtac_adm_user_buf_size;
+static u8 *rtac_adm_buffer;
+
+
+/* ASM APR */
+static u32 rtac_asm_payload_size;
+static u32 rtac_asm_user_buf_size;
+static u8 *rtac_asm_buffer;
+
+
+/* Voice info & APR */
+struct rtac_voice_data {
+ uint32_t tx_topology_id;
+ uint32_t rx_topology_id;
+ uint32_t tx_afe_port;
+ uint32_t rx_afe_port;
+ uint16_t cvs_handle;
+ uint16_t cvp_handle;
+};
+
+struct rtac_voice {
+ uint32_t num_of_voice_combos;
+ struct rtac_voice_data voice[RTAC_MAX_ACTIVE_VOICE_COMBOS];
+};
+
+static struct rtac_voice rtac_voice_data;
+static u32 rtac_voice_payload_size;
+static u32 rtac_voice_user_buf_size;
+static u8 *rtac_voice_buffer;
+static u32 voice_session_id[RTAC_MAX_ACTIVE_VOICE_COMBOS];
+
+
+struct mutex rtac_adm_mutex;
+struct mutex rtac_adm_apr_mutex;
+struct mutex rtac_asm_apr_mutex;
+struct mutex rtac_voice_mutex;
+struct mutex rtac_voice_apr_mutex;
+
+static int rtac_open(struct inode *inode, struct file *f)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+static int rtac_release(struct inode *inode, struct file *f)
+{
+ pr_debug("%s\n", __func__);
+ return 0;
+}
+
+/* ADM Info */
+void add_popp(u32 dev_idx, u32 port_id, u32 popp_id)
+{
+ u32 i = 0;
+
+ for (; i < rtac_adm_data.device[dev_idx].num_of_popp; i++)
+ if (rtac_adm_data.device[dev_idx].popp[i] == popp_id)
+ goto done;
+
+ if (rtac_adm_data.device[dev_idx].num_of_popp ==
+ RTAC_MAX_ACTIVE_POPP) {
+ pr_err("%s, Max POPP!\n", __func__);
+ goto done;
+ }
+ rtac_adm_data.device[dev_idx].popp[
+ rtac_adm_data.device[dev_idx].num_of_popp++] = popp_id;
+done:
+ return;
+}
+
+void rtac_add_adm_device(u32 port_id, u32 copp_id, u32 path_id, u32 popp_id)
+{
+ u32 i = 0;
+ pr_debug("%s: port_id = %d, popp_id = %d\n", __func__, port_id,
+ popp_id);
+
+ mutex_lock(&rtac_adm_mutex);
+ if (rtac_adm_data.num_of_dev == RTAC_MAX_ACTIVE_DEVICES) {
+ pr_err("%s, Can't add anymore RTAC devices!\n", __func__);
+ goto done;
+ }
+
+ /* Check if device already added */
+ if (rtac_adm_data.num_of_dev != 0) {
+ for (; i < rtac_adm_data.num_of_dev; i++) {
+ if (rtac_adm_data.device[i].afe_port == port_id) {
+ add_popp(i, port_id, popp_id);
+ goto done;
+ }
+ if (rtac_adm_data.device[i].num_of_popp ==
+ RTAC_MAX_ACTIVE_POPP) {
+ pr_err("%s, Max POPP!\n", __func__);
+ goto done;
+ }
+ }
+ }
+
+ /* Add device */
+ rtac_adm_data.num_of_dev++;
+
+ if (path_id == ADM_PATH_PLAYBACK)
+ rtac_adm_data.device[i].topology_id =
+ get_adm_rx_topology();
+ else
+ rtac_adm_data.device[i].topology_id =
+ get_adm_tx_topology();
+ rtac_adm_data.device[i].afe_port = port_id;
+ rtac_adm_data.device[i].copp = copp_id;
+ rtac_adm_data.device[i].popp[
+ rtac_adm_data.device[i].num_of_popp++] = popp_id;
+done:
+ mutex_unlock(&rtac_adm_mutex);
+ return;
+}
+
+static void shift_adm_devices(u32 dev_idx)
+{
+ for (; dev_idx < rtac_adm_data.num_of_dev; dev_idx++) {
+ memcpy(&rtac_adm_data.device[dev_idx],
+ &rtac_adm_data.device[dev_idx + 1],
+ sizeof(rtac_adm_data.device[dev_idx]));
+ memset(&rtac_adm_data.device[dev_idx + 1], 0,
+ sizeof(rtac_adm_data.device[dev_idx]));
+ }
+}
+
+static void shift_popp(u32 copp_idx, u32 popp_idx)
+{
+ for (; popp_idx < rtac_adm_data.device[copp_idx].num_of_popp;
+ popp_idx++) {
+ memcpy(&rtac_adm_data.device[copp_idx].popp[popp_idx],
+ &rtac_adm_data.device[copp_idx].popp[popp_idx + 1],
+ sizeof(uint32_t));
+ memset(&rtac_adm_data.device[copp_idx].popp[popp_idx + 1], 0,
+ sizeof(uint32_t));
+ }
+}
+
+void rtac_remove_adm_device(u32 port_id)
+{
+ s32 i;
+ pr_debug("%s: port_id = %d\n", __func__, port_id);
+
+ mutex_lock(&rtac_adm_mutex);
+ /* look for device */
+ for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+ if (rtac_adm_data.device[i].afe_port == port_id) {
+ memset(&rtac_adm_data.device[i], 0,
+ sizeof(rtac_adm_data.device[i]));
+ rtac_adm_data.num_of_dev--;
+
+ if (rtac_adm_data.num_of_dev >= 1) {
+ shift_adm_devices(i);
+ break;
+ }
+ }
+ }
+
+ mutex_unlock(&rtac_adm_mutex);
+ return;
+}
+
+void rtac_remove_popp_from_adm_devices(u32 popp_id)
+{
+ s32 i, j;
+ pr_debug("%s: popp_id = %d\n", __func__, popp_id);
+
+ mutex_lock(&rtac_adm_mutex);
+
+ for (i = 0; i < rtac_adm_data.num_of_dev; i++) {
+ for (j = 0; j < rtac_adm_data.device[i].num_of_popp; j++) {
+ if (rtac_adm_data.device[i].popp[j] == popp_id) {
+ rtac_adm_data.device[i].popp[j] = 0;
+ rtac_adm_data.device[i].num_of_popp--;
+ shift_popp(i, j);
+ }
+ }
+ }
+
+ mutex_unlock(&rtac_adm_mutex);
+}
+
+/* Voice Info */
+static void set_rtac_voice_data(int idx, u32 cvs_handle, u32 cvp_handle,
+ u32 rx_afe_port, u32 tx_afe_port,
+ u32 session_id)
+{
+ rtac_voice_data.voice[idx].tx_topology_id = get_voice_tx_topology();
+ rtac_voice_data.voice[idx].rx_topology_id = get_voice_rx_topology();
+ rtac_voice_data.voice[idx].tx_afe_port = tx_afe_port;
+ rtac_voice_data.voice[idx].rx_afe_port = rx_afe_port;
+ rtac_voice_data.voice[idx].cvs_handle = cvs_handle;
+ rtac_voice_data.voice[idx].cvp_handle = cvp_handle;
+
+ /* Store session ID for voice RTAC */
+ voice_session_id[idx] = session_id;
+}
+
+void rtac_add_voice(u32 cvs_handle, u32 cvp_handle, u32 rx_afe_port,
+ u32 tx_afe_port, u32 session_id)
+{
+ u32 i = 0;
+ pr_debug("%s\n", __func__);
+ mutex_lock(&rtac_voice_mutex);
+
+ if (rtac_voice_data.num_of_voice_combos ==
+ RTAC_MAX_ACTIVE_VOICE_COMBOS) {
+ pr_err("%s, Can't add anymore RTAC devices!\n", __func__);
+ goto done;
+ }
+
+ /* Check if device already added */
+ if (rtac_voice_data.num_of_voice_combos != 0) {
+ for (; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvs_handle ==
+ cvs_handle) {
+ set_rtac_voice_data(i, cvs_handle, cvp_handle,
+ rx_afe_port, tx_afe_port,
+ session_id);
+ goto done;
+ }
+ }
+ }
+
+ /* Add device */
+ rtac_voice_data.num_of_voice_combos++;
+ set_rtac_voice_data(i, cvs_handle, cvp_handle,
+ rx_afe_port, tx_afe_port,
+ session_id);
+done:
+ mutex_unlock(&rtac_voice_mutex);
+ return;
+}
+
+static void shift_voice_devices(u32 idx)
+{
+ for (; idx < rtac_voice_data.num_of_voice_combos - 1; idx++) {
+ memcpy(&rtac_voice_data.voice[idx],
+ &rtac_voice_data.voice[idx + 1],
+ sizeof(rtac_voice_data.voice[idx]));
+ voice_session_id[idx] = voice_session_id[idx + 1];
+ }
+}
+
+void rtac_remove_voice(u32 cvs_handle)
+{
+ u32 i = 0;
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&rtac_voice_mutex);
+ /* look for device */
+ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvs_handle == cvs_handle) {
+ shift_voice_devices(i);
+ rtac_voice_data.num_of_voice_combos--;
+ memset(&rtac_voice_data.voice[
+ rtac_voice_data.num_of_voice_combos], 0,
+ sizeof(rtac_voice_data.voice
+ [rtac_voice_data.num_of_voice_combos]));
+ voice_session_id[rtac_voice_data.num_of_voice_combos]
+ = 0;
+ break;
+ }
+ }
+ mutex_unlock(&rtac_voice_mutex);
+ return;
+}
+
+static int get_voice_index_cvs(u32 cvs_handle)
+{
+ u32 i;
+
+ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvs_handle == cvs_handle)
+ return i;
+ }
+
+ pr_err("%s: No voice index for CVS handle %d found returning 0\n",
+ __func__, cvs_handle);
+ return 0;
+}
+
+static int get_voice_index_cvp(u32 cvp_handle)
+{
+ u32 i;
+
+ for (i = 0; i < rtac_voice_data.num_of_voice_combos; i++) {
+ if (rtac_voice_data.voice[i].cvp_handle == cvp_handle)
+ return i;
+ }
+
+ pr_err("%s: No voice index for CVP handle %d found returning 0\n",
+ __func__, cvp_handle);
+ return 0;
+}
+
+static int get_voice_index(u32 mode, u32 handle)
+{
+ if (mode == RTAC_CVP)
+ return get_voice_index_cvp(handle);
+ if (mode == RTAC_CVS)
+ return get_voice_index_cvs(handle);
+
+ pr_err("%s: Invalid mode %d, returning 0\n",
+ __func__, mode);
+ return 0;
+}
+
+
+/* ADM APR */
+void rtac_set_adm_handle(void *handle)
+{
+ pr_debug("%s: handle = %d\n", __func__, (unsigned int)handle);
+
+ mutex_lock(&rtac_adm_apr_mutex);
+ rtac_adm_apr_data.apr_handle = handle;
+ mutex_unlock(&rtac_adm_apr_mutex);
+}
+
+bool rtac_make_adm_callback(uint32_t *payload, u32 payload_size)
+{
+ pr_debug("%s:cmd_state = %d\n", __func__,
+ atomic_read(&rtac_adm_apr_data.cmd_state));
+ if (atomic_read(&rtac_adm_apr_data.cmd_state) != 1)
+ return false;
+
+ /* Offset data for in-band payload */
+ rtac_copy_adm_payload_to_user(payload, payload_size);
+ atomic_set(&rtac_adm_apr_data.cmd_state, 0);
+ wake_up(&rtac_adm_apr_data.cmd_wait);
+ return true;
+}
+
+void rtac_copy_adm_payload_to_user(void *payload, u32 payload_size)
+{
+ pr_debug("%s\n", __func__);
+ rtac_adm_payload_size = payload_size;
+
+ memcpy(rtac_adm_buffer, &payload_size, sizeof(u32));
+ if (payload_size != 0) {
+ if (payload_size > rtac_adm_user_buf_size) {
+ pr_err("%s: Buffer set not big enough for returned data, buf size = %d,ret data = %d\n",
+ __func__, rtac_adm_user_buf_size, payload_size);
+ goto done;
+ }
+ memcpy(rtac_adm_buffer + sizeof(u32), payload, payload_size);
+ }
+done:
+ return;
+}
+
+u32 send_adm_apr(void *buf, u32 opcode)
+{
+ s32 result;
+ u32 count = 0;
+ u32 bytes_returned = 0;
+ u32 port_index = 0;
+ u32 copp_id;
+ u32 payload_size;
+ struct apr_hdr adm_params;
+ pr_debug("%s\n", __func__);
+
+ if (copy_from_user(&count, (void *)buf, sizeof(count))) {
+ pr_err("%s: Copy to user failed! buf = 0x%x\n",
+ __func__, (unsigned int)buf);
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (count <= 0) {
+ pr_err("%s: Invalid buffer size = %d\n", __func__, count);
+ goto done;
+ }
+
+ if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
+ pr_err("%s: Could not copy payload size from user buffer\n",
+ __func__);
+ goto done;
+ }
+
+
+ if (payload_size > MAX_PAYLOAD_SIZE) {
+ pr_err("%s: Invalid payload size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+
+ if (copy_from_user(&copp_id, buf + 2 * sizeof(u32), sizeof(u32))) {
+ pr_err("%s: Could not copy port id from user buffer\n",
+ __func__);
+ goto done;
+ }
+
+ for (port_index = 0; port_index < AFE_MAX_PORTS; port_index++) {
+ if (adm_get_copp_id(port_index) == copp_id)
+ break;
+ }
+ if (port_index >= AFE_MAX_PORTS) {
+ pr_err("%s: Could not find port index for copp = %d\n",
+ __func__, copp_id);
+ goto done;
+ }
+
+ mutex_lock(&rtac_adm_apr_mutex);
+ if (rtac_adm_apr_data.apr_handle == NULL) {
+ pr_err("%s: APR not initialized\n", __func__);
+ goto err;
+ }
+
+ /* Set globals for copy of returned payload */
+ rtac_adm_user_buf_size = count;
+ /* Copy buffer to in-band payload */
+ if (copy_from_user(rtac_adm_buffer + sizeof(adm_params),
+ buf + 3 * sizeof(u32), payload_size)) {
+ pr_err("%s: Could not copy payload from user buffer\n",
+ __func__);
+ goto err;
+ }
+
+ /* Pack header */
+ adm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ adm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ payload_size);
+ adm_params.src_svc = APR_SVC_ADM;
+ adm_params.src_domain = APR_DOMAIN_APPS;
+ adm_params.src_port = copp_id;
+ adm_params.dest_svc = APR_SVC_ADM;
+ adm_params.dest_domain = APR_DOMAIN_ADSP;
+ adm_params.dest_port = copp_id;
+ adm_params.token = copp_id;
+ adm_params.opcode = opcode;
+
+ memcpy(rtac_adm_buffer, &adm_params, sizeof(adm_params));
+ atomic_set(&rtac_adm_apr_data.cmd_state, 1);
+
+ pr_debug("%s: Sending RTAC command size = %d\n",
+ __func__, adm_params.pkt_size);
+
+ result = apr_send_pkt(rtac_adm_apr_data.apr_handle,
+ (uint32_t *)rtac_adm_buffer);
+ if (result < 0) {
+ pr_err("%s: Set params failed port = %d, copp = %d\n",
+ __func__, port_index, copp_id);
+ goto err;
+ }
+ /* Wait for the callback */
+ result = wait_event_timeout(rtac_adm_apr_data.cmd_wait,
+ (atomic_read(&rtac_adm_apr_data.cmd_state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ mutex_unlock(&rtac_adm_apr_mutex);
+ if (!result) {
+ pr_err("%s: Set params timed out port = %d, copp = %d\n",
+ __func__, port_index, copp_id);
+ goto done;
+ }
+
+ if (rtac_adm_payload_size != 0) {
+ if (copy_to_user(buf, rtac_adm_buffer,
+ rtac_adm_payload_size + sizeof(u32))) {
+ pr_err("%s: Could not copy buffer to user, size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+ }
+
+ /* Return data written for SET & data read for GET */
+ if (opcode == ADM_CMD_GET_PARAMS)
+ bytes_returned = rtac_adm_payload_size;
+ else
+ bytes_returned = payload_size;
+done:
+ return bytes_returned;
+err:
+ mutex_unlock(&rtac_adm_apr_mutex);
+ return bytes_returned;
+}
+
+
+/* ASM APR */
+void rtac_set_asm_handle(u32 session_id, void *handle)
+{
+ pr_debug("%s\n", __func__);
+
+ mutex_lock(&rtac_asm_apr_mutex);
+ rtac_asm_apr_data[session_id].apr_handle = handle;
+ mutex_unlock(&rtac_asm_apr_mutex);
+}
+
+bool rtac_make_asm_callback(u32 session_id, uint32_t *payload,
+ u32 payload_size)
+{
+ if (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) != 1)
+ return false;
+
+ pr_debug("%s\n", __func__);
+ /* Offset data for in-band payload */
+ rtac_copy_asm_payload_to_user(payload, payload_size);
+ atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 0);
+ wake_up(&rtac_asm_apr_data[session_id].cmd_wait);
+ return true;
+}
+
+void rtac_copy_asm_payload_to_user(void *payload, u32 payload_size)
+{
+ pr_debug("%s\n", __func__);
+ rtac_asm_payload_size = payload_size;
+
+ memcpy(rtac_asm_buffer, &payload_size, sizeof(u32));
+ if (payload_size) {
+ if (payload_size > rtac_asm_user_buf_size) {
+ pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
+ __func__, rtac_asm_user_buf_size, payload_size);
+ goto done;
+ }
+ memcpy(rtac_asm_buffer + sizeof(u32), payload, payload_size);
+ }
+done:
+ return;
+}
+
+u32 send_rtac_asm_apr(void *buf, u32 opcode)
+{
+ s32 result;
+ u32 count = 0;
+ u32 bytes_returned = 0;
+ u32 session_id = 0;
+ u32 payload_size;
+ struct apr_hdr asm_params;
+ pr_debug("%s\n", __func__);
+
+ if (copy_from_user(&count, (void *)buf, sizeof(count))) {
+ pr_err("%s: Copy to user failed! buf = 0x%x\n",
+ __func__, (unsigned int)buf);
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (count <= 0) {
+ pr_err("%s: Invalid buffer size = %d\n", __func__, count);
+ goto done;
+ }
+
+ if (copy_from_user(&payload_size, buf + sizeof(u32), sizeof(u32))) {
+ pr_err("%s: Could not copy payload size from user buffer\n",
+ __func__);
+ goto done;
+ }
+
+ if (payload_size > MAX_PAYLOAD_SIZE) {
+ pr_err("%s: Invalid payload size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+
+ if (copy_from_user(&session_id, buf + 2 * sizeof(u32), sizeof(u32))) {
+ pr_err("%s: Could not copy session id from user buffer\n",
+ __func__);
+ goto done;
+ }
+ if (session_id > (SESSION_MAX + 1)) {
+ pr_err("%s: Invalid Session = %d\n", __func__, session_id);
+ goto done;
+ }
+
+ mutex_lock(&rtac_asm_apr_mutex);
+ if (session_id < SESSION_MAX+1) {
+ if (rtac_asm_apr_data[session_id].apr_handle == NULL) {
+ pr_err("%s: APR not initialized\n", __func__);
+ goto err;
+ }
+ }
+
+ /* Set globals for copy of returned payload */
+ rtac_asm_user_buf_size = count;
+
+ /* Copy buffer to in-band payload */
+ if (copy_from_user(rtac_asm_buffer + sizeof(asm_params),
+ buf + 3 * sizeof(u32), payload_size)) {
+ pr_err("%s: Could not copy payload from user buffer\n",
+ __func__);
+ goto err;
+ }
+
+ /* Pack header */
+ asm_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ asm_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ payload_size);
+ asm_params.src_svc = q6asm_get_apr_service_id(session_id);
+ asm_params.src_domain = APR_DOMAIN_APPS;
+ asm_params.src_port = (session_id << 8) | 0x0001;
+ asm_params.dest_svc = APR_SVC_ASM;
+ asm_params.dest_domain = APR_DOMAIN_ADSP;
+ asm_params.dest_port = (session_id << 8) | 0x0001;
+ asm_params.token = session_id;
+ asm_params.opcode = opcode;
+
+ memcpy(rtac_asm_buffer, &asm_params, sizeof(asm_params));
+ if (session_id < SESSION_MAX+1)
+ atomic_set(&rtac_asm_apr_data[session_id].cmd_state, 1);
+
+ pr_debug("%s: Sending RTAC command size = %d, session_id=%d\n",
+ __func__, asm_params.pkt_size, session_id);
+
+ result = apr_send_pkt(rtac_asm_apr_data[session_id].apr_handle,
+ (uint32_t *)rtac_asm_buffer);
+ if (result < 0) {
+ pr_err("%s: Set params failed session = %d\n",
+ __func__, session_id);
+ goto err;
+ }
+
+ /* Wait for the callback */
+ result = wait_event_timeout(rtac_asm_apr_data[session_id].cmd_wait,
+ (atomic_read(&rtac_asm_apr_data[session_id].cmd_state) == 0),
+ 5 * HZ);
+ mutex_unlock(&rtac_asm_apr_mutex);
+ if (!result) {
+ pr_err("%s: Set params timed out session = %d\n",
+ __func__, session_id);
+ goto done;
+ }
+
+ if (rtac_asm_payload_size != 0) {
+ if (copy_to_user(buf, rtac_asm_buffer,
+ rtac_asm_payload_size + sizeof(u32))) {
+ pr_err("%s: Could not copy buffer to user,size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+ }
+
+ /* Return data written for SET & data read for GET */
+ if (opcode == ASM_STREAM_CMD_GET_PP_PARAMS)
+ bytes_returned = rtac_asm_payload_size;
+ else
+ bytes_returned = payload_size;
+done:
+ return bytes_returned;
+err:
+ mutex_unlock(&rtac_asm_apr_mutex);
+ return bytes_returned;
+}
+
+
+/* Voice APR */
+void rtac_set_voice_handle(u32 mode, void *handle)
+{
+ pr_debug("%s\n", __func__);
+ mutex_lock(&rtac_voice_apr_mutex);
+ rtac_voice_apr_data[mode].apr_handle = handle;
+ mutex_unlock(&rtac_voice_apr_mutex);
+}
+
+bool rtac_make_voice_callback(u32 mode, uint32_t *payload, u32 payload_size)
+{
+ if ((atomic_read(&rtac_voice_apr_data[mode].cmd_state) != 1) ||
+ (mode >= RTAC_VOICE_MODES))
+ return false;
+
+ pr_debug("%s\n", __func__);
+ /* Offset data for in-band payload */
+ rtac_copy_voice_payload_to_user(payload, payload_size);
+ atomic_set(&rtac_voice_apr_data[mode].cmd_state, 0);
+ wake_up(&rtac_voice_apr_data[mode].cmd_wait);
+ return true;
+}
+
+void rtac_copy_voice_payload_to_user(void *payload, u32 payload_size)
+{
+ pr_debug("%s\n", __func__);
+ rtac_voice_payload_size = payload_size;
+
+ memcpy(rtac_voice_buffer, &payload_size, sizeof(u32));
+ if (payload_size) {
+ if (payload_size > rtac_voice_user_buf_size) {
+ pr_err("%s: Buffer set not big enough for returned data, buf size = %d, ret data = %d\n",
+ __func__, rtac_voice_user_buf_size, payload_size);
+ goto done;
+ }
+ memcpy(rtac_voice_buffer + sizeof(u32), payload, payload_size);
+ }
+done:
+ return;
+}
+
+u32 send_voice_apr(u32 mode, void *buf, u32 opcode)
+{
+ s32 result;
+ u32 count = 0;
+ u32 bytes_returned = 0;
+ u32 payload_size;
+ u32 dest_port;
+ struct apr_hdr voice_params;
+ pr_debug("%s\n", __func__);
+
+ if (copy_from_user(&count, (void *)buf, sizeof(count))) {
+ pr_err("%s: Copy to user failed! buf = 0x%x\n",
+ __func__, (unsigned int)buf);
+ result = -EFAULT;
+ goto done;
+ }
+
+ if (count <= 0) {
+ pr_err("%s: Invalid buffer size = %d\n", __func__, count);
+ goto done;
+ }
+
+ if (copy_from_user(&payload_size, buf + sizeof(payload_size),
+ sizeof(payload_size))) {
+ pr_err("%s: Could not copy payload size from user buffer\n",
+ __func__);
+ goto done;
+ }
+
+ if (payload_size > MAX_PAYLOAD_SIZE) {
+ pr_err("%s: Invalid payload size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+
+ if (copy_from_user(&dest_port, buf + 2 * sizeof(dest_port),
+ sizeof(dest_port))) {
+ pr_err("%s: Could not copy port id from user buffer\n",
+ __func__);
+ goto done;
+ }
+
+ if ((mode != RTAC_CVP) && (mode != RTAC_CVS)) {
+ pr_err("%s: Invalid Mode for APR, mode = %d\n",
+ __func__, mode);
+ goto done;
+ }
+
+ mutex_lock(&rtac_voice_apr_mutex);
+ if (rtac_voice_apr_data[mode].apr_handle == NULL) {
+ pr_err("%s: APR not initialized\n", __func__);
+ goto err;
+ }
+
+ /* Set globals for copy of returned payload */
+ rtac_voice_user_buf_size = count;
+
+ /* Copy buffer to in-band payload */
+ if (copy_from_user(rtac_voice_buffer + sizeof(voice_params),
+ buf + 3 * sizeof(u32), payload_size)) {
+ pr_err("%s: Could not copy payload from user buffer\n",
+ __func__);
+ goto err;
+ }
+
+ /* Pack header */
+ voice_params.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ voice_params.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ payload_size);
+ voice_params.src_svc = 0;
+ voice_params.src_domain = APR_DOMAIN_APPS;
+ voice_params.src_port = voice_session_id[
+ get_voice_index(mode, dest_port)];
+ voice_params.dest_svc = 0;
+ voice_params.dest_domain = APR_DOMAIN_MODEM;
+ voice_params.dest_port = (u16)dest_port;
+ voice_params.token = 0;
+ voice_params.opcode = opcode;
+
+ memcpy(rtac_voice_buffer, &voice_params, sizeof(voice_params));
+ atomic_set(&rtac_voice_apr_data[mode].cmd_state, 1);
+
+ pr_debug("%s: Sending RTAC command size = %d, opcode = %x\n",
+ __func__, voice_params.pkt_size, opcode);
+
+ result = apr_send_pkt(rtac_voice_apr_data[mode].apr_handle,
+ (uint32_t *)rtac_voice_buffer);
+ if (result < 0) {
+ pr_err("%s: apr_send_pkt failed opcode = %x\n",
+ __func__, opcode);
+ goto err;
+ }
+ /* Wait for the callback */
+ result = wait_event_timeout(rtac_voice_apr_data[mode].cmd_wait,
+ (atomic_read(&rtac_voice_apr_data[mode].cmd_state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ mutex_unlock(&rtac_voice_apr_mutex);
+ if (!result) {
+ pr_err("%s: apr_send_pkt timed out opcode = %x\n",
+ __func__, opcode);
+ goto done;
+ }
+
+ if (rtac_voice_payload_size != 0) {
+ if (copy_to_user(buf, rtac_voice_buffer,
+ rtac_voice_payload_size + sizeof(u32))) {
+ pr_err("%s: Could not copy buffer to user,size = %d\n",
+ __func__, payload_size);
+ goto done;
+ }
+ }
+
+ /* Return data written for SET & data read for GET */
+ if (opcode == VOICE_CMD_GET_PARAM)
+ bytes_returned = rtac_voice_payload_size;
+ else
+ bytes_returned = payload_size;
+done:
+ return bytes_returned;
+err:
+ mutex_unlock(&rtac_voice_apr_mutex);
+ return bytes_returned;
+}
+
+
+
+static long rtac_ioctl(struct file *f,
+ unsigned int cmd, unsigned long arg)
+{
+ s32 result = 0;
+ pr_debug("%s\n", __func__);
+
+ if (arg == 0) {
+ pr_err("%s: No data sent to driver!\n", __func__);
+ result = -EFAULT;
+ goto done;
+ }
+
+ switch (cmd) {
+ case AUDIO_GET_RTAC_ADM_INFO:
+ if (copy_to_user((void *)arg, &rtac_adm_data,
+ sizeof(rtac_adm_data)))
+ pr_err("%s: Could not copy to userspace!\n", __func__);
+ else
+ result = sizeof(rtac_adm_data);
+ break;
+ case AUDIO_GET_RTAC_VOICE_INFO:
+ if (copy_to_user((void *)arg, &rtac_voice_data,
+ sizeof(rtac_voice_data)))
+ pr_err("%s: Could not copy to userspace!\n", __func__);
+ else
+ result = sizeof(rtac_voice_data);
+ break;
+ case AUDIO_GET_RTAC_ADM_CAL:
+ result = send_adm_apr((void *)arg, ADM_CMD_GET_PARAMS);
+ break;
+ case AUDIO_SET_RTAC_ADM_CAL:
+ result = send_adm_apr((void *)arg, ADM_CMD_SET_PARAMS);
+ break;
+ case AUDIO_GET_RTAC_ASM_CAL:
+ result = send_rtac_asm_apr((void *)arg,
+ ASM_STREAM_CMD_GET_PP_PARAMS);
+ break;
+ case AUDIO_SET_RTAC_ASM_CAL:
+ result = send_rtac_asm_apr((void *)arg,
+ ASM_STREAM_CMD_SET_PP_PARAMS);
+ break;
+ case AUDIO_GET_RTAC_CVS_CAL:
+ result = send_voice_apr(RTAC_CVS, (void *)arg,
+ VOICE_CMD_GET_PARAM);
+ break;
+ case AUDIO_SET_RTAC_CVS_CAL:
+ result = send_voice_apr(RTAC_CVS, (void *)arg,
+ VOICE_CMD_SET_PARAM);
+ break;
+ case AUDIO_GET_RTAC_CVP_CAL:
+ result = send_voice_apr(RTAC_CVP, (void *)arg,
+ VOICE_CMD_GET_PARAM);
+ break;
+ case AUDIO_SET_RTAC_CVP_CAL:
+ result = send_voice_apr(RTAC_CVP, (void *)arg,
+ VOICE_CMD_SET_PARAM);
+ break;
+ default:
+ pr_err("%s: Invalid IOCTL, command = %d!\n",
+ __func__, cmd);
+ }
+done:
+ return result;
+}
+
+
+static const struct file_operations rtac_fops = {
+ .owner = THIS_MODULE,
+ .open = rtac_open,
+ .release = rtac_release,
+ .unlocked_ioctl = rtac_ioctl,
+};
+
+struct miscdevice rtac_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "msm_rtac",
+ .fops = &rtac_fops,
+};
+
+static int __init rtac_init(void)
+{
+ int i = 0;
+ pr_debug("%s\n", __func__);
+
+ /* ADM */
+ memset(&rtac_adm_data, 0, sizeof(rtac_adm_data));
+ rtac_adm_apr_data.apr_handle = NULL;
+ atomic_set(&rtac_adm_apr_data.cmd_state, 0);
+ init_waitqueue_head(&rtac_adm_apr_data.cmd_wait);
+ mutex_init(&rtac_adm_mutex);
+ mutex_init(&rtac_adm_apr_mutex);
+
+ rtac_adm_buffer = kzalloc(RTAC_BUF_SIZE, GFP_KERNEL);
+ if (rtac_adm_buffer == NULL) {
+ pr_err("%s: Could not allocate payload of size = %d\n",
+ __func__, RTAC_BUF_SIZE);
+ goto nomem;
+ }
+
+ /* ASM */
+ for (i = 0; i < SESSION_MAX+1; i++) {
+ rtac_asm_apr_data[i].apr_handle = NULL;
+ atomic_set(&rtac_asm_apr_data[i].cmd_state, 0);
+ init_waitqueue_head(&rtac_asm_apr_data[i].cmd_wait);
+ }
+ mutex_init(&rtac_asm_apr_mutex);
+
+ rtac_asm_buffer = kzalloc(RTAC_BUF_SIZE, GFP_KERNEL);
+ if (rtac_asm_buffer == NULL) {
+ pr_err("%s: Could not allocate payload of size = %d\n",
+ __func__, RTAC_BUF_SIZE);
+ kzfree(rtac_adm_buffer);
+ goto nomem;
+ }
+
+ /* Voice */
+ memset(&rtac_voice_data, 0, sizeof(rtac_voice_data));
+ for (i = 0; i < RTAC_VOICE_MODES; i++) {
+ rtac_voice_apr_data[i].apr_handle = NULL;
+ atomic_set(&rtac_voice_apr_data[i].cmd_state, 0);
+ init_waitqueue_head(&rtac_voice_apr_data[i].cmd_wait);
+ }
+ mutex_init(&rtac_voice_mutex);
+ mutex_init(&rtac_voice_apr_mutex);
+
+ rtac_voice_buffer = kzalloc(RTAC_BUF_SIZE, GFP_KERNEL);
+ if (rtac_voice_buffer == NULL) {
+ pr_err("%s: Could not allocate payload of size = %d\n",
+ __func__, RTAC_BUF_SIZE);
+ kzfree(rtac_adm_buffer);
+ kzfree(rtac_asm_buffer);
+ goto nomem;
+ }
+
+ return misc_register(&rtac_misc);
+nomem:
+ return -ENOMEM;
+}
+
+module_init(rtac_init);
+
+MODULE_DESCRIPTION("MSM 8x60 Real-Time Audio Calibration driver");
+MODULE_LICENSE("GPL v2");
+
+#endif
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
new file mode 100644
index 0000000000000..b565411a5f7cd
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -0,0 +1,1241 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/err.h>
+
+#include <sound/qdsp6v2/audio_dev_ctl.h>
+#include <sound/qdsp6v2/audio_acdb.h>
+#include <sound/qdsp6v2/rtac.h>
+
+#include <sound/apr_audio.h>
+#include <sound/q6afe.h>
+
+#define TIMEOUT_MS 1000
+#define AUDIO_RX 0x0
+#define AUDIO_TX 0x1
+
+#define ASM_MAX_SESSION 0x8 /* To do: define in a header */
+#define RESET_COPP_ID 99
+#define INVALID_COPP_ID 0xFF
+
+struct adm_ctl {
+ void *apr;
+ atomic_t copp_id[AFE_MAX_PORTS];
+ atomic_t copp_cnt[AFE_MAX_PORTS];
+ atomic_t copp_stat[AFE_MAX_PORTS];
+ wait_queue_head_t wait;
+ int ec_ref_rx;
+};
+
+static struct acdb_cal_block mem_addr_audproc[MAX_AUDPROC_TYPES];
+static struct acdb_cal_block mem_addr_audvol[MAX_AUDPROC_TYPES];
+
+static struct adm_ctl this_adm;
+
+
+int srs_trumedia_open(int port_id, int srs_tech_id, void *srs_params)
+{
+ struct asm_pp_params_command *open = NULL;
+ int ret = 0, sz = 0;
+ int index;
+
+ pr_debug("SRS - %s", __func__);
+
+ index = afe_get_port_index(port_id);
+
+ if (IS_ERR_VALUE(index)) {
+ pr_err("%s: invald port id\n", __func__);
+ return index;
+ }
+
+ switch (srs_tech_id) {
+ case SRS_ID_GLOBAL: {
+ struct srs_trumedia_params_GLOBAL *glb_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_GLOBAL);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_GLOBAL) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS;
+ open->params.param_size =
+ sizeof(struct srs_trumedia_params_GLOBAL);
+ glb_params = (struct srs_trumedia_params_GLOBAL *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(glb_params, srs_params,
+ sizeof(struct srs_trumedia_params_GLOBAL));
+ pr_debug("SRS - %s: Global params - 1 = %x, 2 = %x, 3 = %x,"
+ " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x\n",
+ __func__, (int)glb_params->v1,
+ (int)glb_params->v2, (int)glb_params->v3,
+ (int)glb_params->v4, (int)glb_params->v5,
+ (int)glb_params->v6, (int)glb_params->v7,
+ (int)glb_params->v8);
+ break;
+ }
+ case SRS_ID_WOWHD: {
+ struct srs_trumedia_params_WOWHD *whd_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_WOWHD);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_WOWHD) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS_WOWHD;
+ open->params.param_size =
+ sizeof(struct srs_trumedia_params_WOWHD);
+ whd_params = (struct srs_trumedia_params_WOWHD *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(whd_params, srs_params,
+ sizeof(struct srs_trumedia_params_WOWHD));
+ pr_debug("SRS - %s: WOWHD params - 1 = %x, 2 = %x, 3 = %x,"
+ " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x, 9 = %x,"
+ " 10 = %x, 11 = %x\n", __func__, (int)whd_params->v1,
+ (int)whd_params->v2, (int)whd_params->v3,
+ (int)whd_params->v4, (int)whd_params->v5,
+ (int)whd_params->v6, (int)whd_params->v7,
+ (int)whd_params->v8, (int)whd_params->v9,
+ (int)whd_params->v10, (int)whd_params->v11);
+ break;
+ }
+ case SRS_ID_CSHP: {
+ struct srs_trumedia_params_CSHP *chp_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_CSHP);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_CSHP) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS_CSHP;
+ open->params.param_size =
+ sizeof(struct srs_trumedia_params_CSHP);
+ chp_params = (struct srs_trumedia_params_CSHP *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(chp_params, srs_params,
+ sizeof(struct srs_trumedia_params_CSHP));
+ pr_debug("SRS - %s: CSHP params - 1 = %x, 2 = %x, 3 = %x,"
+ " 4 = %x, 5 = %x, 6 = %x, 7 = %x, 8 = %x,"
+ " 9 = %x\n", __func__, (int)chp_params->v1,
+ (int)chp_params->v2, (int)chp_params->v3,
+ (int)chp_params->v4, (int)chp_params->v5,
+ (int)chp_params->v6, (int)chp_params->v7,
+ (int)chp_params->v8, (int)chp_params->v9);
+ break;
+ }
+ case SRS_ID_HPF: {
+ struct srs_trumedia_params_HPF *hpf_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_HPF);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_HPF) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS_HPF;
+ open->params.param_size =
+ sizeof(struct srs_trumedia_params_HPF);
+ hpf_params = (struct srs_trumedia_params_HPF *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(hpf_params, srs_params,
+ sizeof(struct srs_trumedia_params_HPF));
+ pr_debug("SRS - %s: HPF params - 1 = %x\n", __func__,
+ (int)hpf_params->v1);
+ break;
+ }
+ case SRS_ID_PEQ: {
+ struct srs_trumedia_params_PEQ *peq_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_PEQ);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_PEQ) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS_PEQ;
+ open->params.param_size =
+ sizeof(struct srs_trumedia_params_PEQ);
+ peq_params = (struct srs_trumedia_params_PEQ *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(peq_params, srs_params,
+ sizeof(struct srs_trumedia_params_PEQ));
+ pr_debug("SRS - %s: PEQ params - 1 = %x 2 = %x, 3 = %x,"
+ " 4 = %x\n", __func__, (int)peq_params->v1,
+ (int)peq_params->v2, (int)peq_params->v3,
+ (int)peq_params->v4);
+ break;
+ }
+ case SRS_ID_HL: {
+ struct srs_trumedia_params_HL *hl_params = NULL;
+ sz = sizeof(struct asm_pp_params_command) +
+ sizeof(struct srs_trumedia_params_HL);
+ open = kzalloc(sz, GFP_KERNEL);
+ open->payload_size = sizeof(struct srs_trumedia_params_HL) +
+ sizeof(struct asm_pp_param_data_hdr);
+ open->params.param_id = SRS_TRUMEDIA_PARAMS_HL;
+ open->params.param_size = sizeof(struct srs_trumedia_params_HL);
+ hl_params = (struct srs_trumedia_params_HL *)((u8 *)open +
+ sizeof(struct asm_pp_params_command));
+ memcpy(hl_params, srs_params,
+ sizeof(struct srs_trumedia_params_HL));
+ pr_debug("SRS - %s: HL params - 1 = %x, 2 = %x, 3 = %x, 4 = %x,"
+ " 5 = %x, 6 = %x, 7 = %x\n", __func__,
+ (int)hl_params->v1, (int)hl_params->v2,
+ (int)hl_params->v3, (int)hl_params->v4,
+ (int)hl_params->v5, (int)hl_params->v6,
+ (int)hl_params->v7);
+ break;
+ }
+ default:
+ goto fail_cmd;
+ }
+
+ open->payload = NULL;
+ open->params.module_id = SRS_TRUMEDIA_MODULE_ID;
+ open->params.reserved = 0;
+ open->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ open->hdr.pkt_size = sz;
+ open->hdr.src_svc = APR_SVC_ADM;
+ open->hdr.src_domain = APR_DOMAIN_APPS;
+ open->hdr.src_port = port_id;
+ open->hdr.dest_svc = APR_SVC_ADM;
+ open->hdr.dest_domain = APR_DOMAIN_ADSP;
+ open->hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+ open->hdr.token = port_id;
+ open->hdr.opcode = ADM_CMD_SET_PARAMS;
+ pr_debug("SRS - %s: Command was sent now check Q6 - port id = %d,"
+ " size %d, module id %x, param id %x.\n", __func__,
+ open->hdr.dest_port, open->payload_size,
+ open->params.module_id, open->params.param_id);
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)open);
+ if (ret < 0) {
+ pr_err("SRS - %s: ADM enable for port %d failed\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait, 1,
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("SRS - %s: ADM open failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+fail_cmd:
+ kfree(open);
+ return ret;
+}
+
+static int32_t adm_callback(struct apr_client_data *data, void *priv)
+{
+ uint32_t *payload;
+ int i, index;
+ payload = data->payload;
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("adm_callback: Reset event is received: %d %d apr[%p]\n",
+ data->reset_event, data->reset_proc,
+ this_adm.apr);
+ if (this_adm.apr) {
+ apr_reset(this_adm.apr);
+ for (i = 0; i < AFE_MAX_PORTS; i++) {
+ atomic_set(&this_adm.copp_id[i],
+ RESET_COPP_ID);
+ atomic_set(&this_adm.copp_cnt[i], 0);
+ atomic_set(&this_adm.copp_stat[i], 0);
+ }
+ this_adm.apr = NULL;
+ }
+ pr_debug("Resetting calibration blocks");
+ for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
+ /* Device calibration */
+ mem_addr_audproc[i].cal_size = 0;
+ mem_addr_audproc[i].cal_kvaddr = 0;
+ mem_addr_audproc[i].cal_paddr = 0;
+
+ /* Volume calibration */
+ mem_addr_audvol[i].cal_size = 0;
+ mem_addr_audvol[i].cal_kvaddr = 0;
+ mem_addr_audvol[i].cal_paddr = 0;
+ }
+ return 0;
+ }
+
+ pr_debug("%s: code = 0x%x %x %x size = %d\n", __func__,
+ data->opcode, payload[0], payload[1],
+ data->payload_size);
+
+ if (data->payload_size) {
+ index = afe_get_port_index(data->token);
+ pr_debug("%s: Port ID %d, index %d\n", __func__,
+ data->token, index);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d token %d\n",
+ __func__, index, data->token);
+ return 0;
+ }
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ pr_debug("APR_BASIC_RSP_RESULT id %x\n", payload[0]);
+ switch (payload[0]) {
+ case ADM_CMD_SET_PARAMS:
+ if (rtac_make_adm_callback(payload,
+ data->payload_size))
+ break;
+ case ADM_CMD_COPP_CLOSE:
+ case ADM_CMD_MEMORY_MAP:
+ case ADM_CMD_MEMORY_UNMAP:
+ case ADM_CMD_MEMORY_MAP_REGIONS:
+ case ADM_CMD_MEMORY_UNMAP_REGIONS:
+ case ADM_CMD_MATRIX_MAP_ROUTINGS:
+ case ADM_CMD_CONNECT_AFE_PORT:
+ case ADM_CMD_DISCONNECT_AFE_PORT:
+ atomic_set(&this_adm.copp_stat[index], 1);
+ wake_up(&this_adm.wait);
+ break;
+ default:
+ pr_err("%s: Unknown Cmd: 0x%x\n", __func__,
+ payload[0]);
+ break;
+ }
+ return 0;
+ }
+
+ switch (data->opcode) {
+ case ADM_CMDRSP_COPP_OPEN:
+ case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN:
+ case ADM_CMDRSP_MULTI_CHANNEL_COPP_OPEN_V3: {
+ struct adm_copp_open_respond *open = data->payload;
+ if (open->copp_id == INVALID_COPP_ID) {
+ pr_err("%s: invalid coppid rxed %d\n",
+ __func__, open->copp_id);
+ atomic_set(&this_adm.copp_stat[index], 1);
+ wake_up(&this_adm.wait);
+ break;
+ }
+ atomic_set(&this_adm.copp_id[index], open->copp_id);
+ atomic_set(&this_adm.copp_stat[index], 1);
+ pr_debug("%s: coppid rxed=%d\n", __func__,
+ open->copp_id);
+ wake_up(&this_adm.wait);
+ }
+ break;
+ case ADM_CMDRSP_GET_PARAMS:
+ pr_debug("%s: ADM_CMDRSP_GET_PARAMS\n", __func__);
+ rtac_make_adm_callback(payload,
+ data->payload_size);
+ break;
+ default:
+ pr_err("%s: Unknown cmd:0x%x\n", __func__,
+ data->opcode);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int send_adm_cal_block(int port_id, struct acdb_cal_block *aud_cal)
+{
+ s32 result = 0;
+ struct adm_set_params_command adm_params;
+ int index = afe_get_port_index(port_id);
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d portid %d\n",
+ __func__, index, port_id);
+ return 0;
+ }
+
+ pr_debug("%s: Port id %d, index %d\n", __func__, port_id, index);
+
+ if (!aud_cal || aud_cal->cal_size == 0) {
+ pr_debug("%s: No ADM cal to send for port_id = %d!\n",
+ __func__, port_id);
+ result = -EINVAL;
+ goto done;
+ }
+
+ adm_params.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ adm_params.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(adm_params));
+ adm_params.hdr.src_svc = APR_SVC_ADM;
+ adm_params.hdr.src_domain = APR_DOMAIN_APPS;
+ adm_params.hdr.src_port = port_id;
+ adm_params.hdr.dest_svc = APR_SVC_ADM;
+ adm_params.hdr.dest_domain = APR_DOMAIN_ADSP;
+ adm_params.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+ adm_params.hdr.token = port_id;
+ adm_params.hdr.opcode = ADM_CMD_SET_PARAMS;
+ adm_params.payload = aud_cal->cal_paddr;
+ adm_params.payload_size = aud_cal->cal_size;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ pr_debug("%s: Sending SET_PARAMS payload = 0x%x, size = %d\n",
+ __func__, adm_params.payload, adm_params.payload_size);
+ result = apr_send_pkt(this_adm.apr, (uint32_t *)&adm_params);
+ if (result < 0) {
+ pr_err("%s: Set params failed port = %d payload = 0x%x\n",
+ __func__, port_id, aud_cal->cal_paddr);
+ result = -EINVAL;
+ goto done;
+ }
+ /* Wait for the callback */
+ result = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!result) {
+ pr_err("%s: Set params timed out port = %d, payload = 0x%x\n",
+ __func__, port_id, aud_cal->cal_paddr);
+ result = -EINVAL;
+ goto done;
+ }
+
+ result = 0;
+done:
+ return result;
+}
+
+static void send_adm_cal(int port_id, int path)
+{
+ int result = 0;
+ s32 acdb_path;
+ struct acdb_cal_block aud_cal;
+
+ pr_debug("%s\n", __func__);
+
+ /* Maps audio_dev_ctrl path definition to ACDB definition */
+ acdb_path = path - 1;
+
+ pr_debug("%s: Sending audproc cal\n", __func__);
+ get_audproc_cal(acdb_path, &aud_cal);
+
+ /* map & cache buffers used */
+ if (((mem_addr_audproc[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
+ (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size > mem_addr_audproc[acdb_path].cal_size)) {
+
+ if (mem_addr_audproc[acdb_path].cal_paddr != 0)
+ adm_memory_unmap_regions(
+ &mem_addr_audproc[acdb_path].cal_paddr,
+ &mem_addr_audproc[acdb_path].cal_size, 1);
+
+ result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
+ &aud_cal.cal_size, 1);
+ if (result < 0)
+ pr_err("ADM audproc mmap did not work! path = %d, "
+ "addr = 0x%x, size = %d\n", acdb_path,
+ aud_cal.cal_paddr, aud_cal.cal_size);
+ else
+ mem_addr_audproc[acdb_path] = aud_cal;
+ }
+
+ if (!send_adm_cal_block(port_id, &aud_cal))
+ pr_debug("%s: Audproc cal sent for port id: %d, path %d\n",
+ __func__, port_id, acdb_path);
+ else
+ pr_debug("%s: Audproc cal not sent for port id: %d, path %d\n",
+ __func__, port_id, acdb_path);
+
+ pr_debug("%s: Sending audvol cal\n", __func__);
+ get_audvol_cal(acdb_path, &aud_cal);
+
+ /* map & cache buffers used */
+ if (((mem_addr_audvol[acdb_path].cal_paddr != aud_cal.cal_paddr) &&
+ (aud_cal.cal_size > 0)) ||
+ (aud_cal.cal_size > mem_addr_audvol[acdb_path].cal_size)) {
+ if (mem_addr_audvol[acdb_path].cal_paddr != 0)
+ adm_memory_unmap_regions(
+ &mem_addr_audvol[acdb_path].cal_paddr,
+ &mem_addr_audvol[acdb_path].cal_size, 1);
+
+ result = adm_memory_map_regions(&aud_cal.cal_paddr, 0,
+ &aud_cal.cal_size, 1);
+ if (result < 0)
+ pr_err("ADM audvol mmap did not work! path = %d, "
+ "addr = 0x%x, size = %d\n", acdb_path,
+ aud_cal.cal_paddr, aud_cal.cal_size);
+ else
+ mem_addr_audvol[acdb_path] = aud_cal;
+ }
+
+ if (!send_adm_cal_block(port_id, &aud_cal))
+ pr_debug("%s: Audvol cal sent for port id: %d, path %d\n",
+ __func__, port_id, acdb_path);
+ else
+ pr_debug("%s: Audvol cal not sent for port id: %d, path %d\n",
+ __func__, port_id, acdb_path);
+}
+
+int adm_connect_afe_port(int mode, int session_id, int port_id)
+{
+ struct adm_cmd_connect_afe_port cmd;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+ port_id, session_id, mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cmd.hdr.pkt_size = sizeof(cmd);
+ cmd.hdr.src_svc = APR_SVC_ADM;
+ cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cmd.hdr.src_port = port_id;
+ cmd.hdr.dest_svc = APR_SVC_ADM;
+ cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cmd.hdr.dest_port = port_id;
+ cmd.hdr.token = port_id;
+ cmd.hdr.opcode = ADM_CMD_CONNECT_AFE_PORT;
+
+ cmd.mode = mode;
+ cmd.session_id = session_id;
+ cmd.afe_port_id = port_id;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ atomic_inc(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
+int adm_disconnect_afe_port(int mode, int session_id, int port_id)
+{
+ struct adm_cmd_connect_afe_port cmd;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d session id:%d mode:%d\n", __func__,
+ port_id, session_id, mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cmd.hdr.pkt_size = sizeof(cmd);
+ cmd.hdr.src_svc = APR_SVC_ADM;
+ cmd.hdr.src_domain = APR_DOMAIN_APPS;
+ cmd.hdr.src_port = port_id;
+ cmd.hdr.dest_svc = APR_SVC_ADM;
+ cmd.hdr.dest_domain = APR_DOMAIN_ADSP;
+ cmd.hdr.dest_port = port_id;
+ cmd.hdr.token = port_id;
+ cmd.hdr.opcode = ADM_CMD_DISCONNECT_AFE_PORT;
+
+ cmd.mode = mode;
+ cmd.session_id = session_id;
+ cmd.afe_port_id = port_id;
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&cmd);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM connect AFE failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ atomic_dec(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
+int adm_open(int port_id, int path, int rate, int channel_mode, int topology)
+{
+ struct adm_copp_open_command open;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d path:%d rate:%d mode:%d\n", __func__,
+ port_id, path, rate, channel_mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+
+
+ /* Create a COPP if port id are not enabled */
+ if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ open.hdr.pkt_size = sizeof(open);
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = port_id;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = port_id;
+ open.hdr.token = port_id;
+ open.hdr.opcode = ADM_CMD_COPP_OPEN;
+
+ open.mode = path;
+ open.endpoint_id1 = port_id;
+
+ if (this_adm.ec_ref_rx == 0) {
+ open.endpoint_id2 = 0xFFFF;
+ } else if (this_adm.ec_ref_rx && (path != 1)) {
+ open.endpoint_id2 = this_adm.ec_ref_rx;
+ this_adm.ec_ref_rx = 0;
+ }
+
+ pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+ __func__, open.endpoint_id1, open.endpoint_id2);
+ /* convert path to acdb path */
+ if (path == ADM_PATH_PLAYBACK)
+ open.topology_id = get_adm_rx_topology();
+ else {
+ open.topology_id = get_adm_tx_topology();
+ if ((open.topology_id ==
+ VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
+ (open.topology_id ==
+ VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
+ rate = 16000;
+ }
+
+ if ((open.topology_id == 0) || (port_id == VOICE_RECORD_RX) || (port_id == VOICE_RECORD_TX))
+ open.topology_id = topology;
+
+ open.channel_config = channel_mode & 0x00FF;
+ open.rate = rate;
+
+ pr_debug("%s: channel_config=%d port_id=%d rate=%d"
+ "topology_id=0x%X\n", __func__, open.channel_config,\
+ open.endpoint_id1, open.rate,\
+ open.topology_id);
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM open failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ }
+ atomic_inc(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
+
+int adm_multi_ch_copp_open(int port_id, int path, int rate, int channel_mode,
+ int topology, int perfmode)
+{
+ struct adm_multi_ch_copp_open_command open;
+ int ret = 0;
+ int index;
+
+ pr_debug("%s: port %d path:%d rate:%d channel :%d\n", __func__,
+ port_id, path, rate, channel_mode);
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s port idi[%d] is invalid\n", __func__, port_id);
+ return -ENODEV;
+ }
+
+ index = afe_get_port_index(port_id);
+ pr_debug("%s: Port ID %d, index %d\n", __func__, port_id, index);
+
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+
+ /* Create a COPP if port id are not enabled */
+ if (atomic_read(&this_adm.copp_cnt[index]) == 0) {
+
+ open.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+
+ open.hdr.pkt_size =
+ sizeof(struct adm_multi_ch_copp_open_command);
+
+ if (perfmode) {
+ pr_debug("%s Performance mode", __func__);
+ open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN_V3;
+ open.flags = ADM_MULTI_CH_COPP_OPEN_PERF_MODE_BIT;
+ open.reserved = PCM_BITS_PER_SAMPLE;
+ } else {
+ open.hdr.opcode = ADM_CMD_MULTI_CHANNEL_COPP_OPEN;
+ open.reserved = 0;
+ }
+
+ memset(open.dev_channel_mapping, 0, 8);
+
+ if (channel_mode == 1) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FC;
+ } else if (channel_mode == 2) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ } else if (channel_mode == 4) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_RB;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_LB;
+ } else if (channel_mode == 6) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
+ open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
+ open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+ } else if (channel_mode == 8) {
+ open.dev_channel_mapping[0] = PCM_CHANNEL_FL;
+ open.dev_channel_mapping[1] = PCM_CHANNEL_FR;
+ open.dev_channel_mapping[2] = PCM_CHANNEL_LFE;
+ open.dev_channel_mapping[3] = PCM_CHANNEL_FC;
+ open.dev_channel_mapping[4] = PCM_CHANNEL_LB;
+ open.dev_channel_mapping[5] = PCM_CHANNEL_RB;
+ open.dev_channel_mapping[6] = PCM_CHANNEL_FLC;
+ open.dev_channel_mapping[7] = PCM_CHANNEL_FRC;
+ } else {
+ pr_err("%s invalid num_chan %d\n", __func__,
+ channel_mode);
+ return -EINVAL;
+ }
+ open.hdr.src_svc = APR_SVC_ADM;
+ open.hdr.src_domain = APR_DOMAIN_APPS;
+ open.hdr.src_port = port_id;
+ open.hdr.dest_svc = APR_SVC_ADM;
+ open.hdr.dest_domain = APR_DOMAIN_ADSP;
+ open.hdr.dest_port = port_id;
+ open.hdr.token = port_id;
+
+ open.mode = path;
+ open.endpoint_id1 = port_id;
+
+ if (this_adm.ec_ref_rx == 0) {
+ open.endpoint_id2 = 0xFFFF;
+ } else if (this_adm.ec_ref_rx && (path != 1)) {
+ open.endpoint_id2 = this_adm.ec_ref_rx;
+ this_adm.ec_ref_rx = 0;
+ }
+
+ pr_debug("%s open.endpoint_id1:%d open.endpoint_id2:%d",
+ __func__, open.endpoint_id1, open.endpoint_id2);
+ /* convert path to acdb path */
+ if (path == ADM_PATH_PLAYBACK)
+ open.topology_id = get_adm_rx_topology();
+ else {
+ open.topology_id = get_adm_tx_topology();
+ if ((open.topology_id ==
+ VPM_TX_SM_ECNS_COPP_TOPOLOGY) ||
+ (open.topology_id ==
+ VPM_TX_DM_FLUENCE_COPP_TOPOLOGY))
+ rate = 16000;
+ }
+
+ if ((open.topology_id == 0) || (port_id == VOICE_RECORD_RX) || (port_id == VOICE_RECORD_TX))
+ open.topology_id = topology;
+
+ open.channel_config = channel_mode & 0x00FF;
+ open.rate = rate;
+
+ pr_debug("%s: channel_config=%d port_id=%d rate=%d"
+ " topology_id=0x%X\n", __func__, open.channel_config,
+ open.endpoint_id1, open.rate,
+ open.topology_id);
+
+ atomic_set(&this_adm.copp_stat[index], 0);
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&open);
+ if (ret < 0) {
+ pr_err("%s:ADM enable for port %d failed\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ /* Wait for the callback with copp id */
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s ADM open failed for port %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ }
+ atomic_inc(&this_adm.copp_cnt[index]);
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
+int adm_matrix_map(int session_id, int path, int num_copps,
+ unsigned int *port_id, int copp_id)
+{
+ struct adm_routings_command route;
+ int ret = 0, i = 0;
+ /* Assumes port_ids have already been validated during adm_open */
+ int index = afe_get_port_index(copp_id);
+ int copp_cnt;
+
+ if (index < 0 || index >= AFE_MAX_PORTS) {
+ pr_err("%s: invalid port idx %d token %d\n",
+ __func__, index, copp_id);
+ return 0;
+ }
+
+ pr_debug("%s: session 0x%x path:%d num_copps:%d port_id[0]:%d\n",
+ __func__, session_id, path, num_copps, port_id[0]);
+
+ route.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ route.hdr.pkt_size = sizeof(route);
+ route.hdr.src_svc = 0;
+ route.hdr.src_domain = APR_DOMAIN_APPS;
+ route.hdr.src_port = copp_id;
+ route.hdr.dest_svc = APR_SVC_ADM;
+ route.hdr.dest_domain = APR_DOMAIN_ADSP;
+ route.hdr.dest_port = atomic_read(&this_adm.copp_id[index]);
+ route.hdr.token = copp_id;
+ route.hdr.opcode = ADM_CMD_MATRIX_MAP_ROUTINGS;
+ route.num_sessions = 1;
+ route.session[0].id = session_id;
+
+ if (num_copps < ADM_MAX_COPPS) {
+ copp_cnt = num_copps;
+ } else {
+ copp_cnt = ADM_MAX_COPPS;
+ /* print out warning for now as playback/capture to/from
+ * COPPs more than maximum allowed is extremely unlikely
+ */
+ pr_warn("%s: max out routable COPPs\n", __func__);
+ }
+
+ route.session[0].num_copps = copp_cnt;
+ for (i = 0; i < copp_cnt; i++) {
+ int tmp;
+ port_id[i] = afe_convert_virtual_to_portid(port_id[i]);
+
+ tmp = afe_get_port_index(port_id[i]);
+
+ pr_debug("%s: port_id[%d]: %d, index: %d\n", __func__, i,
+ port_id[i], tmp);
+
+ if (tmp >= 0 && tmp < AFE_MAX_PORTS)
+ route.session[0].copp_id[i] =
+ atomic_read(&this_adm.copp_id[tmp]);
+ }
+
+ if (copp_cnt % 2)
+ route.session[0].copp_id[i] = 0;
+
+ switch (path) {
+ case 0x1:
+ route.path = AUDIO_RX;
+ break;
+ case 0x2:
+ case 0x3:
+ route.path = AUDIO_TX;
+ break;
+ default:
+ pr_err("%s: Wrong path set[%d]\n", __func__, path);
+ break;
+ }
+ atomic_set(&this_adm.copp_stat[index], 0);
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&route);
+ if (ret < 0) {
+ pr_err("%s: ADM routing for port %d failed\n",
+ __func__, port_id[0]);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: ADM cmd Route failed for port %d\n",
+ __func__, port_id[0]);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ for (i = 0; i < num_copps; i++)
+ send_adm_cal(port_id[i], path);
+
+ for (i = 0; i < num_copps; i++) {
+ int tmp;
+ tmp = afe_get_port_index(port_id[i]);
+ if (tmp >= 0 && tmp < AFE_MAX_PORTS)
+ rtac_add_adm_device(port_id[i],
+ atomic_read(&this_adm.copp_id[tmp]),
+ path, session_id);
+ else
+ pr_debug("%s: Invalid port index %d",
+ __func__, tmp);
+ }
+ return 0;
+
+fail_cmd:
+
+ return ret;
+}
+
+int adm_memory_map_regions(uint32_t *buf_add, uint32_t mempool_id,
+ uint32_t *bufsz, uint32_t bufcnt)
+{
+ struct adm_cmd_memory_map_regions *mmap_regions = NULL;
+ struct adm_memory_map_regions *mregions = NULL;
+ void *mmap_region_cmd = NULL;
+ void *payload = NULL;
+ int ret = 0;
+ int i = 0;
+ int cmd_size = 0;
+
+ pr_debug("%s\n", __func__);
+ if (this_adm.apr == NULL) {
+ this_adm.apr = apr_register("ADSP", "ADM", adm_callback,
+ 0xFFFFFFFF, &this_adm);
+ if (this_adm.apr == NULL) {
+ pr_err("%s: Unable to register ADM\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ rtac_set_adm_handle(this_adm.apr);
+ }
+
+ cmd_size = sizeof(struct adm_cmd_memory_map_regions)
+ + sizeof(struct adm_memory_map_regions) * bufcnt;
+
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!mmap_region_cmd) {
+ pr_err("%s: allocate mmap_region_cmd failed\n", __func__);
+ return -ENOMEM;
+ }
+ mmap_regions = (struct adm_cmd_memory_map_regions *)mmap_region_cmd;
+ mmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ mmap_regions->hdr.pkt_size = cmd_size;
+ mmap_regions->hdr.src_port = 0;
+ mmap_regions->hdr.dest_port = 0;
+ mmap_regions->hdr.token = 0;
+ mmap_regions->hdr.opcode = ADM_CMD_MEMORY_MAP_REGIONS;
+ mmap_regions->mempool_id = mempool_id & 0x00ff;
+ mmap_regions->nregions = bufcnt & 0x00ff;
+ pr_debug("%s: map_regions->nregions = %d\n", __func__,
+ mmap_regions->nregions);
+ payload = ((u8 *) mmap_region_cmd +
+ sizeof(struct adm_cmd_memory_map_regions));
+ mregions = (struct adm_memory_map_regions *)payload;
+
+ for (i = 0; i < bufcnt; i++) {
+ mregions->phys = buf_add[i];
+ mregions->buf_size = bufsz[i];
+ ++mregions;
+ }
+
+ atomic_set(&this_adm.copp_stat[0], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *) mmap_region_cmd);
+ if (ret < 0) {
+ pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+ mmap_regions->hdr.opcode, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
+ if (!ret) {
+ pr_err("%s: timeout. waited for memory_map\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+fail_cmd:
+ kfree(mmap_region_cmd);
+ return ret;
+}
+
+int adm_memory_unmap_regions(uint32_t *buf_add, uint32_t *bufsz,
+ uint32_t bufcnt)
+{
+ struct adm_cmd_memory_unmap_regions *unmap_regions = NULL;
+ struct adm_memory_unmap_regions *mregions = NULL;
+ void *unmap_region_cmd = NULL;
+ void *payload = NULL;
+ int ret = 0;
+ int i = 0;
+ int cmd_size = 0;
+
+ pr_debug("%s\n", __func__);
+
+ if (this_adm.apr == NULL) {
+ pr_err("%s APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ cmd_size = sizeof(struct adm_cmd_memory_unmap_regions)
+ + sizeof(struct adm_memory_unmap_regions) * bufcnt;
+
+ unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (!unmap_region_cmd) {
+ pr_err("%s: allocate unmap_region_cmd failed\n", __func__);
+ return -ENOMEM;
+ }
+ unmap_regions = (struct adm_cmd_memory_unmap_regions *)
+ unmap_region_cmd;
+ unmap_regions->hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE),
+ APR_PKT_VER);
+ unmap_regions->hdr.pkt_size = cmd_size;
+ unmap_regions->hdr.src_port = 0;
+ unmap_regions->hdr.dest_port = 0;
+ unmap_regions->hdr.token = 0;
+ unmap_regions->hdr.opcode = ADM_CMD_MEMORY_UNMAP_REGIONS;
+ unmap_regions->nregions = bufcnt & 0x00ff;
+ unmap_regions->reserved = 0;
+ pr_debug("%s: unmap_regions->nregions = %d\n", __func__,
+ unmap_regions->nregions);
+ payload = ((u8 *) unmap_region_cmd +
+ sizeof(struct adm_cmd_memory_unmap_regions));
+ mregions = (struct adm_memory_unmap_regions *)payload;
+
+ for (i = 0; i < bufcnt; i++) {
+ mregions->phys = buf_add[i];
+ ++mregions;
+ }
+ atomic_set(&this_adm.copp_stat[0], 0);
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *) unmap_region_cmd);
+ if (ret < 0) {
+ pr_err("%s: mmap_regions op[0x%x]rc[%d]\n", __func__,
+ unmap_regions->hdr.opcode, ret);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[0]), 5 * HZ);
+ if (!ret) {
+ pr_err("%s: timeout. waited for memory_unmap\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+fail_cmd:
+ kfree(unmap_region_cmd);
+ return ret;
+}
+
+int adm_get_copp_id(int port_index)
+{
+ pr_debug("%s\n", __func__);
+
+ if (port_index < 0) {
+ pr_err("%s: invalid port_id = %d\n", __func__, port_index);
+ return -EINVAL;
+ }
+
+ return atomic_read(&this_adm.copp_id[port_index]);
+}
+
+void adm_ec_ref_rx_id(int port_id)
+{
+ this_adm.ec_ref_rx = port_id;
+ pr_debug("%s ec_ref_rx:%d", __func__, this_adm.ec_ref_rx);
+}
+
+int adm_close(int port_id)
+{
+ struct apr_hdr close;
+
+ int ret = 0;
+ int index = 0;
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ index = afe_get_port_index(port_id);
+ if (afe_validate_port(port_id) < 0)
+ return -EINVAL;
+
+ pr_debug("%s port_id=%d index %d\n", __func__, port_id, index);
+
+ if (!(atomic_read(&this_adm.copp_cnt[index]))) {
+ pr_err("%s: copp count for port[%d]is 0\n", __func__, port_id);
+
+ goto fail_cmd;
+ }
+ atomic_dec(&this_adm.copp_cnt[index]);
+ if (!(atomic_read(&this_adm.copp_cnt[index]))) {
+
+ close.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ close.pkt_size = sizeof(close);
+ close.src_svc = APR_SVC_ADM;
+ close.src_domain = APR_DOMAIN_APPS;
+ close.src_port = port_id;
+ close.dest_svc = APR_SVC_ADM;
+ close.dest_domain = APR_DOMAIN_ADSP;
+ close.dest_port = atomic_read(&this_adm.copp_id[index]);
+ close.token = port_id;
+ close.opcode = ADM_CMD_COPP_CLOSE;
+
+ atomic_set(&this_adm.copp_id[index], RESET_COPP_ID);
+ atomic_set(&this_adm.copp_stat[index], 0);
+
+
+ pr_debug("%s:coppid %d portid=%d index=%d coppcnt=%d\n",
+ __func__,
+ atomic_read(&this_adm.copp_id[index]),
+ port_id, index,
+ atomic_read(&this_adm.copp_cnt[index]));
+
+ ret = apr_send_pkt(this_adm.apr, (uint32_t *)&close);
+ if (ret < 0) {
+ pr_err("%s ADM close failed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_adm.wait,
+ atomic_read(&this_adm.copp_stat[index]),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: ADM cmd Route failed for port %d\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rtac_remove_adm_device(port_id);
+ }
+
+fail_cmd:
+ return ret;
+}
+
+static int __init adm_init(void)
+{
+ int i = 0;
+ init_waitqueue_head(&this_adm.wait);
+ this_adm.apr = NULL;
+
+ for (i = 0; i < AFE_MAX_PORTS; i++) {
+ atomic_set(&this_adm.copp_id[i], RESET_COPP_ID);
+ atomic_set(&this_adm.copp_cnt[i], 0);
+ atomic_set(&this_adm.copp_stat[i], 0);
+ }
+ return 0;
+}
+
+device_initcall(adm_init);
diff --git a/sound/soc/qcom/qdsp6/q6afe.c b/sound/soc/qcom/qdsp6/q6afe.c
new file mode 100644
index 0000000000000..5935ff3c90b71
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/q6afe.c
@@ -0,0 +1,1826 @@
+/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <sound/qdsp6v2/audio_acdb.h>
+#include <sound/apr_audio.h>
+#include <sound/q6afe.h>
+
+struct afe_ctl {
+ void *apr;
+ atomic_t state;
+ atomic_t status;
+ wait_queue_head_t wait;
+ struct task_struct *task;
+ void (*tx_cb) (uint32_t opcode,
+ uint32_t token, uint32_t *payload, void *priv);
+ void (*rx_cb) (uint32_t opcode,
+ uint32_t token, uint32_t *payload, void *priv);
+ void *tx_private_data;
+ void *rx_private_data;
+};
+
+static struct afe_ctl this_afe;
+
+static struct acdb_cal_block afe_cal_addr[MAX_AUDPROC_TYPES];
+static int pcm_afe_instance[2];
+static int proxy_afe_instance[2];
+bool afe_close_done[2] = {true, true};
+
+#define TIMEOUT_MS 1000
+#define Q6AFE_MAX_VOLUME 0x3FFF
+
+#define SIZEOF_CFG_CMD(y) \
+ (sizeof(struct apr_hdr) + sizeof(u16) + (sizeof(struct y)))
+
+static int32_t afe_callback(struct apr_client_data *data, void *priv)
+{
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("q6afe: reset event = %d %d apr[%p]\n",
+ data->reset_event, data->reset_proc, this_afe.apr);
+ if (this_afe.apr) {
+ apr_reset(this_afe.apr);
+ atomic_set(&this_afe.state, 0);
+ this_afe.apr = NULL;
+ }
+ /* send info to user */
+ pr_debug("task_name = %s pid = %d\n",
+ this_afe.task->comm, this_afe.task->pid);
+ send_sig(SIGUSR1, this_afe.task, 0);
+ return 0;
+ }
+ if (data->payload_size) {
+ uint32_t *payload;
+ uint16_t port_id = 0;
+ payload = data->payload;
+ pr_debug("%s:opcode = 0x%x cmd = 0x%x status = 0x%x\n",
+ __func__, data->opcode,
+ payload[0], payload[1]);
+ /* payload[1] contains the error status for response */
+ if (payload[1] != 0) {
+ atomic_set(&this_afe.status, -1);
+ pr_err("%s: cmd = 0x%x returned error = 0x%x\n",
+ __func__, payload[0], payload[1]);
+ }
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ switch (payload[0]) {
+ case AFE_PORT_AUDIO_IF_CONFIG:
+ case AFE_PORT_CMD_I2S_CONFIG:
+ case AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG:
+ case AFE_PORT_AUDIO_SLIM_SCH_CONFIG:
+ case AFE_PORT_CMD_STOP:
+ case AFE_PORT_CMD_START:
+ case AFE_PORT_CMD_LOOPBACK:
+ case AFE_PORT_CMD_SIDETONE_CTL:
+ case AFE_PORT_CMD_SET_PARAM:
+ case AFE_PSEUDOPORT_CMD_START:
+ case AFE_PSEUDOPORT_CMD_STOP:
+ case AFE_PORT_CMD_APPLY_GAIN:
+ case AFE_SERVICE_CMD_MEMORY_MAP:
+ case AFE_SERVICE_CMD_MEMORY_UNMAP:
+ case AFE_SERVICE_CMD_UNREG_RTPORT:
+ atomic_set(&this_afe.state, 0);
+ wake_up(&this_afe.wait);
+ break;
+ case AFE_SERVICE_CMD_REG_RTPORT:
+ break;
+ case AFE_SERVICE_CMD_RTPORT_WR:
+ port_id = RT_PROXY_PORT_001_TX;
+ break;
+ case AFE_SERVICE_CMD_RTPORT_RD:
+ port_id = RT_PROXY_PORT_001_RX;
+ break;
+ default:
+ pr_err("Unknown cmd 0x%x\n",
+ payload[0]);
+ break;
+ }
+ } else if (data->opcode == AFE_EVENT_RT_PROXY_PORT_STATUS) {
+ port_id = (uint16_t)(0x0000FFFF & payload[0]);
+ }
+ pr_debug("%s:port_id = %x\n", __func__, port_id);
+ switch (port_id) {
+ case RT_PROXY_PORT_001_TX: {
+ if (this_afe.tx_cb) {
+ this_afe.tx_cb(data->opcode, data->token,
+ data->payload,
+ this_afe.tx_private_data);
+ }
+ break;
+ }
+ case RT_PROXY_PORT_001_RX: {
+ if (this_afe.rx_cb) {
+ this_afe.rx_cb(data->opcode, data->token,
+ data->payload,
+ this_afe.rx_private_data);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+int afe_get_port_type(u16 port_id)
+{
+ int ret;
+
+ switch (port_id) {
+ case PRIMARY_I2S_RX:
+ case PCM_RX:
+ case SECONDARY_PCM_RX:
+ case SECONDARY_I2S_RX:
+ case MI2S_RX:
+ case HDMI_RX:
+ case SLIMBUS_0_RX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_3_RX:
+ case INT_BT_SCO_RX:
+ case INT_BT_A2DP_RX:
+ case INT_FM_RX:
+ case VOICE_PLAYBACK_TX:
+ case RT_PROXY_PORT_001_RX:
+ case SLIMBUS_4_RX:
+ ret = MSM_AFE_PORT_TYPE_RX;
+ break;
+
+ case PRIMARY_I2S_TX:
+ case PCM_TX:
+ case SECONDARY_PCM_TX:
+ case SECONDARY_I2S_TX:
+ case MI2S_TX:
+ case DIGI_MIC_TX:
+ case VOICE_RECORD_TX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_TX:
+ case INT_FM_TX:
+ case VOICE_RECORD_RX:
+ case INT_BT_SCO_TX:
+ case RT_PROXY_PORT_001_TX:
+ case SLIMBUS_4_TX:
+ ret = MSM_AFE_PORT_TYPE_TX;
+ break;
+
+ default:
+ pr_err("%s: invalid port id %d\n", __func__, port_id);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int afe_validate_port(u16 port_id)
+{
+ int ret;
+
+ switch (port_id) {
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ case PCM_RX:
+ case PCM_TX:
+ case SECONDARY_PCM_RX:
+ case SECONDARY_PCM_TX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case MI2S_RX:
+ case MI2S_TX:
+ case HDMI_RX:
+ case RSVD_2:
+ case RSVD_3:
+ case DIGI_MIC_TX:
+ case VOICE_RECORD_RX:
+ case VOICE_RECORD_TX:
+ case VOICE_PLAYBACK_TX:
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_3_TX:
+ case INT_BT_SCO_RX:
+ case INT_BT_SCO_TX:
+ case INT_BT_A2DP_RX:
+ case INT_FM_RX:
+ case INT_FM_TX:
+ case RT_PROXY_PORT_001_RX:
+ case RT_PROXY_PORT_001_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ {
+ ret = 0;
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int afe_convert_virtual_to_portid(u16 port_id)
+{
+ int ret;
+
+ /* if port_id is virtual, convert to physical..
+ * if port_id is already physical, return physical
+ */
+ if (afe_validate_port(port_id) < 0) {
+ if (port_id == RT_PROXY_DAI_001_RX ||
+ port_id == RT_PROXY_DAI_001_TX ||
+ port_id == RT_PROXY_DAI_002_RX ||
+ port_id == RT_PROXY_DAI_002_TX)
+ ret = VIRTUAL_ID_TO_PORTID(port_id);
+ else
+ ret = -EINVAL;
+ } else
+ ret = port_id;
+
+ return ret;
+}
+
+int afe_get_port_index(u16 port_id)
+{
+ switch (port_id) {
+ case PRIMARY_I2S_RX: return IDX_PRIMARY_I2S_RX;
+ case PRIMARY_I2S_TX: return IDX_PRIMARY_I2S_TX;
+ case PCM_RX: return IDX_PCM_RX;
+ case PCM_TX: return IDX_PCM_TX;
+ case SECONDARY_PCM_RX: return IDX_SECONDARY_PCM_RX;
+ case SECONDARY_PCM_TX: return IDX_SECONDARY_PCM_TX;
+ case SECONDARY_I2S_RX: return IDX_SECONDARY_I2S_RX;
+ case SECONDARY_I2S_TX: return IDX_SECONDARY_I2S_TX;
+ case MI2S_RX: return IDX_MI2S_RX;
+ case MI2S_TX: return IDX_MI2S_TX;
+ case HDMI_RX: return IDX_HDMI_RX;
+ case RSVD_2: return IDX_RSVD_2;
+ case RSVD_3: return IDX_RSVD_3;
+ case DIGI_MIC_TX: return IDX_DIGI_MIC_TX;
+ case VOICE_RECORD_RX: return IDX_VOICE_RECORD_RX;
+ case VOICE_RECORD_TX: return IDX_VOICE_RECORD_TX;
+ case VOICE_PLAYBACK_TX: return IDX_VOICE_PLAYBACK_TX;
+ case SLIMBUS_0_RX: return IDX_SLIMBUS_0_RX;
+ case SLIMBUS_0_TX: return IDX_SLIMBUS_0_TX;
+ case SLIMBUS_1_RX: return IDX_SLIMBUS_1_RX;
+ case SLIMBUS_1_TX: return IDX_SLIMBUS_1_TX;
+ case SLIMBUS_2_RX: return IDX_SLIMBUS_2_RX;
+ case SLIMBUS_2_TX: return IDX_SLIMBUS_2_TX;
+ case SLIMBUS_3_RX: return IDX_SLIMBUS_3_RX;
+ case SLIMBUS_3_TX: return IDX_SLIMBUS_3_TX;
+ case INT_BT_SCO_RX: return IDX_INT_BT_SCO_RX;
+ case INT_BT_SCO_TX: return IDX_INT_BT_SCO_TX;
+ case INT_BT_A2DP_RX: return IDX_INT_BT_A2DP_RX;
+ case INT_FM_RX: return IDX_INT_FM_RX;
+ case INT_FM_TX: return IDX_INT_FM_TX;
+ case RT_PROXY_PORT_001_RX: return IDX_RT_PROXY_PORT_001_RX;
+ case RT_PROXY_PORT_001_TX: return IDX_RT_PROXY_PORT_001_TX;
+ case SLIMBUS_4_RX: return IDX_SLIMBUS_4_RX;
+ case SLIMBUS_4_TX: return IDX_SLIMBUS_4_TX;
+
+ default: return -EINVAL;
+ }
+}
+
+int afe_sizeof_cfg_cmd(u16 port_id)
+{
+ int ret_size;
+ switch (port_id) {
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case MI2S_RX:
+ case MI2S_TX:
+ ret_size = SIZEOF_CFG_CMD(afe_port_mi2s_cfg);
+ break;
+ case HDMI_RX:
+ ret_size = SIZEOF_CFG_CMD(afe_port_hdmi_multi_ch_cfg);
+ break;
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_3_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ ret_size = SIZEOF_CFG_CMD(afe_port_slimbus_sch_cfg);
+ break;
+ case RT_PROXY_PORT_001_RX:
+ case RT_PROXY_PORT_001_TX:
+ ret_size = SIZEOF_CFG_CMD(afe_port_rtproxy_cfg);
+ break;
+ case PCM_RX:
+ case PCM_TX:
+ case SECONDARY_PCM_RX:
+ case SECONDARY_PCM_TX:
+ default:
+ ret_size = SIZEOF_CFG_CMD(afe_port_pcm_cfg);
+ break;
+ }
+ return ret_size;
+}
+
+int afe_q6_interface_prepare(void)
+{
+ int ret = 0;
+
+ pr_debug("%s:", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ }
+ }
+ return ret;
+}
+
+static void afe_send_cal_block(int32_t path, u16 port_id)
+{
+ int result = 0;
+ struct acdb_cal_block cal_block;
+ struct afe_port_cmd_set_param_no_payload afe_cal;
+ pr_debug("%s: path %d\n", __func__, path);
+
+ get_afe_cal(path, &cal_block);
+ if (cal_block.cal_size <= 0) {
+ pr_debug("%s: No AFE cal to send!\n", __func__);
+ goto done;
+ }
+
+ if ((afe_cal_addr[path].cal_paddr != cal_block.cal_paddr) ||
+ (cal_block.cal_size > afe_cal_addr[path].cal_size)) {
+ if (afe_cal_addr[path].cal_paddr != 0)
+ afe_cmd_memory_unmap(
+ afe_cal_addr[path].cal_paddr);
+
+ afe_cmd_memory_map(cal_block.cal_paddr, cal_block.cal_size);
+ afe_cal_addr[path].cal_paddr = cal_block.cal_paddr;
+ afe_cal_addr[path].cal_size = cal_block.cal_size;
+ }
+
+ afe_cal.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ afe_cal.hdr.pkt_size = sizeof(afe_cal);
+ afe_cal.hdr.src_port = 0;
+ afe_cal.hdr.dest_port = 0;
+ afe_cal.hdr.token = 0;
+ afe_cal.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
+ afe_cal.port_id = port_id;
+ afe_cal.payload_size = cal_block.cal_size;
+ afe_cal.payload_address = cal_block.cal_paddr;
+
+ pr_debug("%s: AFE cal sent for device port = %d, path = %d, "
+ "cal size = %d, cal addr = 0x%x\n", __func__,
+ port_id, path, cal_block.cal_size, cal_block.cal_paddr);
+
+ atomic_set(&this_afe.state, 1);
+ result = apr_send_pkt(this_afe.apr, (uint32_t *) &afe_cal);
+ if (result < 0) {
+ pr_err("%s: AFE cal for port %d failed\n",
+ __func__, port_id);
+ }
+
+ result = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!result) {
+ pr_err("%s: wait_event timeout SET AFE CAL\n", __func__);
+ goto done;
+ }
+
+ pr_debug("%s: AFE cal sent for path %d device!\n", __func__, path);
+done:
+ return;
+}
+
+void afe_send_cal(u16 port_id)
+{
+ pr_debug("%s\n", __func__);
+
+ if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_TX)
+ afe_send_cal_block(TX_CAL, port_id);
+ else if (afe_get_port_type(port_id) == MSM_AFE_PORT_TYPE_RX)
+ afe_send_cal_block(RX_CAL, port_id);
+}
+
+/* This function sends multi-channel HDMI configuration command and AFE
+ * calibration which is only supported by QDSP6 on 8960 and onward.
+ */
+int afe_port_start(u16 port_id, union afe_port_config *afe_config,
+ u32 rate)
+{
+ struct afe_port_start_command start;
+ struct afe_audioif_config_command config;
+ int ret;
+
+ if (!afe_config) {
+ pr_err("%s: Error, no configuration data\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+ pr_debug("%s: %d %d\n", __func__, port_id, rate);
+
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before incrementing pcm_afe_instance %d"\
+ " port_id %d\n", __func__,
+ pcm_afe_instance[port_id & 0x1], port_id);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]++;
+ return 0;
+ }
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before incrementing proxy_afe_instance %d"\
+ " port_id %d\n", __func__,
+ proxy_afe_instance[port_id & 0x1], port_id);
+ if (!afe_close_done[port_id & 0x1]) {
+ /*close pcm dai corresponding to the proxy dai*/
+ afe_close(port_id - 0x10);
+ pcm_afe_instance[port_id & 0x1]++;
+ pr_debug("%s: reconfigure afe port again\n", __func__);
+ }
+ proxy_afe_instance[port_id & 0x1]++;
+ afe_close_done[port_id & 0x1] = false;
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ }
+
+ ret = afe_q6_interface_prepare();
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ if (port_id == HDMI_RX) {
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = 0;
+ config.hdr.opcode = AFE_PORT_MULTI_CHAN_HDMI_AUDIO_IF_CONFIG;
+ } else {
+
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = 0;
+ switch (port_id) {
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_3_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG;
+ break;
+ case MI2S_TX:
+ case MI2S_RX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ /* AFE_PORT_CMD_I2S_CONFIG command is not supported
+ * in the LPASS EL 1.0. So we have to distiguish
+ * which AFE command, AFE_PORT_CMD_I2S_CONFIG or
+ * AFE_PORT_AUDIO_IF_CONFIG to use. If the format
+ * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used
+ * to make the backward compatible.
+ */
+ pr_debug("%s: afe_config->mi2s.format = %d\n", __func__,
+ afe_config->mi2s.format);
+ if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM)
+ config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
+ else
+ config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG;
+ break;
+ default:
+ config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
+ break;
+ }
+ }
+
+ if (afe_validate_port(port_id) < 0) {
+
+ pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ config.port_id = port_id;
+ config.port = *afe_config;
+
+ atomic_set(&this_afe.state, 1);
+ atomic_set(&this_afe.status, 0);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+ if (ret < 0) {
+ pr_err("%s: AFE enable for port %d failed\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+
+ if (!ret) {
+ pr_err("%s: wait_event timeout IF CONFIG\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ if (atomic_read(&this_afe.status) != 0) {
+ pr_err("%s: config cmd failed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ /* send AFE cal */
+ afe_send_cal(port_id);
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = 0;
+ start.hdr.opcode = AFE_PORT_CMD_START;
+ start.port_id = port_id;
+ start.gain = 0x2000;
+ start.sample_rate = rate;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: AFE enable for port %d failed\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout PORT START\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ if (this_afe.task != current)
+ this_afe.task = current;
+
+ pr_debug("task_name = %s pid = %d\n",
+ this_afe.task->comm, this_afe.task->pid);
+ return 0;
+
+fail_cmd:
+ return ret;
+}
+
+/* This function should be used by 8660 exclusively */
+int afe_open(u16 port_id, union afe_port_config *afe_config, int rate)
+{
+ struct afe_port_start_command start;
+ struct afe_audioif_config_command config;
+ int ret = 0;
+
+ if (!afe_config) {
+ pr_err("%s: Error, no configuration data\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ pr_debug("%s: %d %d\n", __func__, port_id, rate);
+
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX))
+ return 0;
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX))
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0)
+ return ret;
+
+ config.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ config.hdr.pkt_size = afe_sizeof_cfg_cmd(port_id);
+ config.hdr.src_port = 0;
+ config.hdr.dest_port = 0;
+ config.hdr.token = 0;
+ switch (port_id) {
+ case SLIMBUS_0_RX:
+ case SLIMBUS_0_TX:
+ case SLIMBUS_1_RX:
+ case SLIMBUS_1_TX:
+ case SLIMBUS_2_RX:
+ case SLIMBUS_2_TX:
+ case SLIMBUS_3_RX:
+ case SLIMBUS_3_TX:
+ case SLIMBUS_4_RX:
+ case SLIMBUS_4_TX:
+ config.hdr.opcode = AFE_PORT_AUDIO_SLIM_SCH_CONFIG;
+ break;
+ case MI2S_TX:
+ case MI2S_RX:
+ case SECONDARY_I2S_RX:
+ case SECONDARY_I2S_TX:
+ case PRIMARY_I2S_RX:
+ case PRIMARY_I2S_TX:
+ /* AFE_PORT_CMD_I2S_CONFIG command is not supported
+ * in the LPASS EL 1.0. So we have to distiguish
+ * which AFE command, AFE_PORT_CMD_I2S_CONFIG or
+ * AFE_PORT_AUDIO_IF_CONFIG to use. If the format
+ * is L-PCM, the AFE_PORT_AUDIO_IF_CONFIG is used
+ * to make the backward compatible.
+ */
+ pr_debug("%s: afe_config->mi2s.format = %d\n", __func__,
+ afe_config->mi2s.format);
+ if (afe_config->mi2s.format == MSM_AFE_I2S_FORMAT_LPCM)
+ config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
+ else
+ config.hdr.opcode = AFE_PORT_CMD_I2S_CONFIG;
+ break;
+ default:
+ config.hdr.opcode = AFE_PORT_AUDIO_IF_CONFIG;
+ break;
+ }
+
+ if (afe_validate_port(port_id) < 0) {
+
+ pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ config.port_id = port_id;
+ config.port = *afe_config;
+
+ atomic_set(&this_afe.state, 1);
+ atomic_set(&this_afe.status, 0);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &config);
+ if (ret < 0) {
+ pr_err("%s: AFE enable for port %d failed\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ if (atomic_read(&this_afe.status) != 0) {
+ pr_err("%s: config cmd failed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = 0;
+ start.hdr.opcode = AFE_PORT_CMD_START;
+ start.port_id = port_id;
+ start.gain = 0x2000;
+ start.sample_rate = rate;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+ if (ret < 0) {
+ pr_err("%s: AFE enable for port %d failed\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ if (this_afe.task != current)
+ this_afe.task = current;
+
+ pr_debug("task_name = %s pid = %d\n",
+ this_afe.task->comm, this_afe.task->pid);
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+int afe_loopback(u16 enable, u16 dst_port, u16 src_port)
+{
+ struct afe_loopback_command lb_cmd;
+ int ret = 0;
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0)
+ return ret;
+
+ if ((afe_get_port_type(dst_port) == MSM_AFE_PORT_TYPE_RX) &&
+ (afe_get_port_type(src_port) == MSM_AFE_PORT_TYPE_RX))
+ return afe_loopback_cfg(enable, dst_port, src_port,
+ LB_MODE_EC_REF_VOICE_AUDIO);
+
+ lb_cmd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(20), APR_PKT_VER);
+ lb_cmd.hdr.pkt_size = APR_PKT_SIZE(APR_HDR_SIZE,
+ sizeof(lb_cmd) - APR_HDR_SIZE);
+ lb_cmd.hdr.src_port = 0;
+ lb_cmd.hdr.dest_port = 0;
+ lb_cmd.hdr.token = 0;
+ lb_cmd.hdr.opcode = AFE_PORT_CMD_LOOPBACK;
+ lb_cmd.tx_port_id = src_port;
+ lb_cmd.rx_port_id = dst_port;
+ lb_cmd.mode = 0xFFFF;
+ lb_cmd.enable = (enable ? 1 : 0);
+ atomic_set(&this_afe.state, 1);
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &lb_cmd);
+ if (ret < 0) {
+ pr_err("%s: AFE loopback failed\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ }
+done:
+ return ret;
+}
+
+int afe_loopback_cfg(u16 enable, u16 dst_port, u16 src_port, u16 mode)
+{
+ struct afe_port_cmd_set_param lp_cfg;
+ int ret = 0;
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0)
+ return ret;
+
+ pr_debug("%s: src_port %d, dst_port %d\n",
+ __func__, src_port, dst_port);
+
+ lp_cfg.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ lp_cfg.hdr.pkt_size = sizeof(lp_cfg);
+ lp_cfg.hdr.src_port = 0;
+ lp_cfg.hdr.dest_port = 0;
+ lp_cfg.hdr.token = 0;
+ lp_cfg.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
+
+ lp_cfg.port_id = src_port;
+ lp_cfg.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_cfg);
+ lp_cfg.payload_address = 0;
+
+ lp_cfg.payload.base.module_id = AFE_MODULE_LOOPBACK;
+ lp_cfg.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_CONFIG;
+ lp_cfg.payload.base.param_size = sizeof(struct afe_param_loopback_cfg);
+ lp_cfg.payload.base.reserved = 0;
+
+ lp_cfg.payload.param.loopback_cfg.loopback_cfg_minor_version =
+ AFE_API_VERSION_LOOPBACK_CONFIG;
+ lp_cfg.payload.param.loopback_cfg.dst_port_id = dst_port;
+ lp_cfg.payload.param.loopback_cfg.routing_mode = mode;
+ lp_cfg.payload.param.loopback_cfg.enable = enable;
+ lp_cfg.payload.param.loopback_cfg.reserved = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &lp_cfg);
+ if (ret < 0) {
+ pr_err("%s: AFE loopback config failed for src_port %d, dst_port %d\n",
+ __func__, src_port, dst_port);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (ret < 0) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+int afe_loopback_gain(u16 port_id, u16 volume)
+{
+ struct afe_port_cmd_set_param set_param;
+ int ret = 0;
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+
+ if (afe_validate_port(port_id) < 0) {
+
+ pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ /* RX ports numbers are even .TX ports numbers are odd. */
+ if (port_id % 2 == 0) {
+ pr_err("%s: Failed : afe loopback gain only for TX ports."
+ " port_id %d\n", __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ pr_debug("%s: %d %hX\n", __func__, port_id, volume);
+
+ set_param.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ set_param.hdr.pkt_size = sizeof(set_param);
+ set_param.hdr.src_port = 0;
+ set_param.hdr.dest_port = 0;
+ set_param.hdr.token = 0;
+ set_param.hdr.opcode = AFE_PORT_CMD_SET_PARAM;
+
+ set_param.port_id = port_id;
+ set_param.payload_size = sizeof(struct afe_param_payload_base) +
+ sizeof(struct afe_param_loopback_gain);
+ set_param.payload_address = 0;
+
+ set_param.payload.base.module_id = AFE_MODULE_ID_PORT_INFO;
+ set_param.payload.base.param_id = AFE_PARAM_ID_LOOPBACK_GAIN;
+ set_param.payload.base.param_size =
+ sizeof(struct afe_param_loopback_gain);
+ set_param.payload.base.reserved = 0;
+
+ set_param.payload.param.loopback_gain.gain = volume;
+ set_param.payload.param.loopback_gain.reserved = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_param);
+ if (ret < 0) {
+ pr_err("%s: AFE param set failed for port %d\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (ret < 0) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+int afe_apply_gain(u16 port_id, u16 gain)
+{
+ struct afe_port_gain_command set_gain;
+ int ret = 0;
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s: AFE is not opened\n", __func__);
+ ret = -EPERM;
+ goto fail_cmd;
+ }
+
+ if (afe_validate_port(port_id) < 0) {
+ pr_err("%s: Failed : Invalid Port id = %d\n", __func__,
+ port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ /* RX ports numbers are even .TX ports numbers are odd. */
+ if (port_id % 2 == 0) {
+ pr_err("%s: Failed : afe apply gain only for TX ports."
+ " port_id %d\n", __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ pr_debug("%s: %d %hX\n", __func__, port_id, gain);
+
+ set_gain.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ set_gain.hdr.pkt_size = sizeof(set_gain);
+ set_gain.hdr.src_port = 0;
+ set_gain.hdr.dest_port = 0;
+ set_gain.hdr.token = 0;
+ set_gain.hdr.opcode = AFE_PORT_CMD_APPLY_GAIN;
+
+ set_gain.port_id = port_id;
+ set_gain.gain = gain;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &set_gain);
+ if (ret < 0) {
+ pr_err("%s: AFE Gain set failed for port %d\n",
+ __func__, port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (ret < 0) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+int afe_pseudo_port_start_nowait(u16 port_id)
+{
+ int ret = 0;
+ struct afe_pseudoport_start_command start;
+
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: AFE APR is not registered\n", __func__);
+ return -ENODEV;
+ }
+
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = 0;
+ start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+ start.port_id = port_id;
+ start.timing = 1;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+ if (ret < 0) {
+ pr_err("%s: AFE enable for port %d failed %d\n",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int afe_start_pseudo_port(u16 port_id)
+{
+ int ret = 0;
+ struct afe_pseudoport_start_command start;
+
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+ ret = afe_q6_interface_prepare();
+ if (ret != 0)
+ return ret;
+
+ start.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ start.hdr.pkt_size = sizeof(start);
+ start.hdr.src_port = 0;
+ start.hdr.dest_port = 0;
+ start.hdr.token = 0;
+ start.hdr.opcode = AFE_PSEUDOPORT_CMD_START;
+ start.port_id = port_id;
+ start.timing = 1;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &start);
+ if (ret < 0) {
+ pr_err("%s: AFE enable for port %d failed %d\n",
+ __func__, port_id, ret);
+ return -EINVAL;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int afe_pseudo_port_stop_nowait(u16 port_id)
+{
+ int ret = 0;
+ struct afe_pseudoport_stop_command stop;
+
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s: AFE is already closed\n", __func__);
+ return -EINVAL;
+ }
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+ if (ret < 0) {
+ pr_err("%s: AFE close failed %d\n", __func__, ret);
+ return -EINVAL;
+ }
+
+ return 0;
+
+}
+
+int afe_stop_pseudo_port(u16 port_id)
+{
+ int ret = 0;
+ struct afe_pseudoport_stop_command stop;
+
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s: AFE is already closed\n", __func__);
+ return -EINVAL;
+ }
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PSEUDOPORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+ if (ret < 0) {
+ pr_err("%s: AFE close failed %d\n", __func__, ret);
+ return -EINVAL;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int afe_cmd_memory_map(u32 dma_addr_p, u32 dma_buf_sz)
+{
+ int ret = 0;
+ struct afe_cmd_memory_map mregion;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = 0;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_MEMORY_MAP;
+ mregion.phy_addr = dma_addr_p;
+ mregion.mem_sz = dma_buf_sz;
+ mregion.mem_id = 0;
+ mregion.rsvd = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+ if (ret < 0) {
+ pr_err("%s: AFE memory map cmd failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0;
+}
+
+int afe_cmd_memory_map_nowait(u32 dma_addr_p, u32 dma_buf_sz)
+{
+ int ret = 0;
+ struct afe_cmd_memory_map mregion;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = 0;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_MEMORY_MAP;
+ mregion.phy_addr = dma_addr_p;
+ mregion.mem_sz = dma_buf_sz;
+ mregion.mem_id = 0;
+ mregion.rsvd = 0;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+ if (ret < 0) {
+ pr_err("%s: AFE memory map cmd failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ }
+ return 0;
+}
+
+int afe_cmd_memory_unmap(u32 dma_addr_p)
+{
+ int ret = 0;
+ struct afe_cmd_memory_unmap mregion;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = 0;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_MEMORY_UNMAP;
+ mregion.phy_addr = dma_addr_p;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+ if (ret < 0) {
+ pr_err("%s: AFE memory unmap cmd failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+int afe_cmd_memory_unmap_nowait(u32 dma_addr_p)
+{
+ int ret = 0;
+ struct afe_cmd_memory_unmap mregion;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+
+ mregion.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ mregion.hdr.pkt_size = sizeof(mregion);
+ mregion.hdr.src_port = 0;
+ mregion.hdr.dest_port = 0;
+ mregion.hdr.token = 0;
+ mregion.hdr.opcode = AFE_SERVICE_CMD_MEMORY_UNMAP;
+ mregion.phy_addr = dma_addr_p;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &mregion);
+ if (ret < 0) {
+ pr_err("%s: AFE memory unmap cmd failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ }
+ return 0;
+}
+
+int afe_register_get_events(u16 port_id,
+ void (*cb) (uint32_t opcode,
+ uint32_t token, uint32_t *payload, void *priv),
+ void *private_data)
+{
+ int ret = 0;
+ struct afe_cmd_reg_rtport rtproxy;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX))
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ else
+ return -EINVAL;
+
+ if (port_id == RT_PROXY_PORT_001_TX) {
+ this_afe.tx_cb = cb;
+ this_afe.tx_private_data = private_data;
+ } else if (port_id == RT_PROXY_PORT_001_RX) {
+ this_afe.rx_cb = cb;
+ this_afe.rx_private_data = private_data;
+ }
+
+ rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ rtproxy.hdr.pkt_size = sizeof(rtproxy);
+ rtproxy.hdr.src_port = 1;
+ rtproxy.hdr.dest_port = 1;
+ rtproxy.hdr.token = 0;
+ rtproxy.hdr.opcode = AFE_SERVICE_CMD_REG_RTPORT;
+ rtproxy.port_id = port_id;
+ rtproxy.rsvd = 0;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &rtproxy);
+ if (ret < 0) {
+ pr_err("%s: AFE reg. rtproxy_event failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+int afe_unregister_get_events(u16 port_id)
+{
+ int ret = 0;
+ struct afe_cmd_unreg_rtport rtproxy;
+
+ pr_debug("%s:\n", __func__);
+
+ if (this_afe.apr == NULL) {
+ this_afe.apr = apr_register("ADSP", "AFE", afe_callback,
+ 0xFFFFFFFF, &this_afe);
+ pr_debug("%s: Register AFE\n", __func__);
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Unable to register AFE\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ }
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX))
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ else
+ return -EINVAL;
+
+ rtproxy.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ rtproxy.hdr.pkt_size = sizeof(rtproxy);
+ rtproxy.hdr.src_port = 0;
+ rtproxy.hdr.dest_port = 0;
+ rtproxy.hdr.token = 0;
+ rtproxy.hdr.opcode = AFE_SERVICE_CMD_UNREG_RTPORT;
+ rtproxy.port_id = port_id;
+ rtproxy.rsvd = 0;
+
+ if (port_id == RT_PROXY_PORT_001_TX) {
+ this_afe.tx_cb = NULL;
+ this_afe.tx_private_data = NULL;
+ } else if (port_id == RT_PROXY_PORT_001_RX) {
+ this_afe.rx_cb = NULL;
+ this_afe.rx_private_data = NULL;
+ }
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &rtproxy);
+ if (ret < 0) {
+ pr_err("%s: AFE enable Unreg. rtproxy_event failed %d\n",
+ __func__, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+int afe_rt_proxy_port_write(u32 buf_addr_p, int bytes)
+{
+ int ret = 0;
+ struct afe_cmd_rtport_wr afecmd_wr;
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s:register to AFE is not done\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ pr_debug("%s: buf_addr_p = 0x%08x bytes = %d\n", __func__,
+ buf_addr_p, bytes);
+
+ afecmd_wr.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ afecmd_wr.hdr.pkt_size = sizeof(afecmd_wr);
+ afecmd_wr.hdr.src_port = 0;
+ afecmd_wr.hdr.dest_port = 0;
+ afecmd_wr.hdr.token = 0;
+ afecmd_wr.hdr.opcode = AFE_SERVICE_CMD_RTPORT_WR;
+ afecmd_wr.buf_addr = (uint32_t)buf_addr_p;
+ afecmd_wr.port_id = RT_PROXY_PORT_001_TX;
+ afecmd_wr.bytes_avail = bytes;
+ afecmd_wr.rsvd = 0;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &afecmd_wr);
+ if (ret < 0) {
+ pr_err("%s: AFE rtproxy write to port 0x%x failed %d\n",
+ __func__, afecmd_wr.port_id, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+
+}
+
+int afe_rt_proxy_port_read(u32 buf_addr_p, int bytes)
+{
+ int ret = 0;
+ struct afe_cmd_rtport_rd afecmd_rd;
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s: register to AFE is not done\n", __func__);
+ ret = -ENODEV;
+ return ret;
+ }
+ pr_debug("%s: buf_addr_p = 0x%08x bytes = %d\n", __func__,
+ buf_addr_p, bytes);
+
+ afecmd_rd.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ afecmd_rd.hdr.pkt_size = sizeof(afecmd_rd);
+ afecmd_rd.hdr.src_port = 0;
+ afecmd_rd.hdr.dest_port = 0;
+ afecmd_rd.hdr.token = 0;
+ afecmd_rd.hdr.opcode = AFE_SERVICE_CMD_RTPORT_RD;
+ afecmd_rd.buf_addr = (uint32_t)buf_addr_p;
+ afecmd_rd.port_id = RT_PROXY_PORT_001_RX;
+ afecmd_rd.bytes_avail = bytes;
+ afecmd_rd.rsvd = 0;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &afecmd_rd);
+ if (ret < 0) {
+ pr_err("%s: AFE rtproxy read cmd to port 0x%x failed %d\n",
+ __func__, afecmd_rd.port_id, ret);
+ ret = -EINVAL;
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *debugfs_afelb;
+static struct dentry *debugfs_afelb_gain;
+
+static int afe_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ pr_info("debug intf %s\n", (char *) file->private_data);
+ return 0;
+}
+
+static int afe_get_parameters(char *buf, long int *param1, int num_of_par)
+{
+ char *token;
+ int base, cnt;
+
+ token = strsep(&buf, " ");
+
+ for (cnt = 0; cnt < num_of_par; cnt++) {
+ if (token != NULL) {
+ if ((token[1] == 'x') || (token[1] == 'X'))
+ base = 16;
+ else
+ base = 10;
+
+ if (kstrtoul(token, base, &param1[cnt]) != 0)
+ return -EINVAL;
+
+ token = strsep(&buf, " ");
+ } else
+ return -EINVAL;
+ }
+ return 0;
+}
+#define AFE_LOOPBACK_ON (1)
+#define AFE_LOOPBACK_OFF (0)
+static ssize_t afe_debug_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ char *lb_str = filp->private_data;
+ char lbuf[32];
+ int rc;
+ unsigned long param[5];
+
+ if (cnt > sizeof(lbuf) - 1)
+ return -EINVAL;
+
+ rc = copy_from_user(lbuf, ubuf, cnt);
+ if (rc)
+ return -EFAULT;
+
+ lbuf[cnt] = '\0';
+
+ if (!strcmp(lb_str, "afe_loopback")) {
+ rc = afe_get_parameters(lbuf, param, 3);
+ if (!rc) {
+ pr_info("%s %lu %lu %lu\n", lb_str, param[0], param[1],
+ param[2]);
+
+ if ((param[0] != AFE_LOOPBACK_ON) && (param[0] !=
+ AFE_LOOPBACK_OFF)) {
+ pr_err("%s: Error, parameter 0 incorrect\n",
+ __func__);
+ rc = -EINVAL;
+ goto afe_error;
+ }
+ if ((afe_validate_port(param[1]) < 0) ||
+ (afe_validate_port(param[2])) < 0) {
+ pr_err("%s: Error, invalid afe port\n",
+ __func__);
+ }
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Error, AFE not opened\n", __func__);
+ rc = -EINVAL;
+ } else {
+ rc = afe_loopback(param[0], param[1], param[2]);
+ }
+ } else {
+ pr_err("%s: Error, invalid parameters\n", __func__);
+ rc = -EINVAL;
+ }
+
+ } else if (!strcmp(lb_str, "afe_loopback_gain")) {
+ rc = afe_get_parameters(lbuf, param, 2);
+ if (!rc) {
+ pr_info("%s %lu %lu\n", lb_str, param[0], param[1]);
+
+ if (afe_validate_port(param[0]) < 0) {
+ pr_err("%s: Error, invalid afe port\n",
+ __func__);
+ rc = -EINVAL;
+ goto afe_error;
+ }
+
+ if (param[1] > 100) {
+ pr_err("%s: Error, volume shoud be 0 to 100"
+ " percentage param = %lu\n",
+ __func__, param[1]);
+ rc = -EINVAL;
+ goto afe_error;
+ }
+
+ param[1] = (Q6AFE_MAX_VOLUME * param[1]) / 100;
+
+ if (this_afe.apr == NULL) {
+ pr_err("%s: Error, AFE not opened\n", __func__);
+ rc = -EINVAL;
+ } else {
+ rc = afe_loopback_gain(param[0], param[1]);
+ }
+ } else {
+ pr_err("%s: Error, invalid parameters\n", __func__);
+ rc = -EINVAL;
+ }
+ }
+
+afe_error:
+ if (rc == 0)
+ rc = cnt;
+ else
+ pr_err("%s: rc = %d\n", __func__, rc);
+
+ return rc;
+}
+
+static const struct file_operations afe_debug_fops = {
+ .open = afe_debug_open,
+ .write = afe_debug_write
+};
+#endif
+int afe_sidetone(u16 tx_port_id, u16 rx_port_id, u16 enable, uint16_t gain)
+{
+ struct afe_port_sidetone_command cmd_sidetone;
+ int ret = 0;
+
+ pr_info("%s: tx_port_id:%d rx_port_id:%d enable:%d gain:%d\n", __func__,
+ tx_port_id, rx_port_id, enable, gain);
+ cmd_sidetone.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ cmd_sidetone.hdr.pkt_size = sizeof(cmd_sidetone);
+ cmd_sidetone.hdr.src_port = 0;
+ cmd_sidetone.hdr.dest_port = 0;
+ cmd_sidetone.hdr.token = 0;
+ cmd_sidetone.hdr.opcode = AFE_PORT_CMD_SIDETONE_CTL;
+ cmd_sidetone.tx_port_id = tx_port_id;
+ cmd_sidetone.rx_port_id = rx_port_id;
+ cmd_sidetone.gain = gain;
+ cmd_sidetone.enable = enable;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &cmd_sidetone);
+ if (ret < 0) {
+ pr_err("%s: AFE sidetone failed for tx_port:%d rx_port:%d\n",
+ __func__, tx_port_id, rx_port_id);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (ret < 0) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return ret;
+}
+
+int afe_port_stop_nowait(int port_id)
+{
+ struct afe_port_stop_command stop;
+ int ret = 0;
+
+ if (this_afe.apr == NULL) {
+ pr_err("AFE is already closed\n");
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+
+ if (ret == -ENETRESET) {
+ pr_info("%s: Need to reset, calling APR deregister", __func__);
+ return apr_deregister(this_afe.apr);
+ } else if (IS_ERR_VALUE(ret)) {
+ pr_err("%s: AFE close failed\n", __func__);
+ ret = -EINVAL;
+ }
+
+fail_cmd:
+ return ret;
+
+}
+
+int afe_close(int port_id)
+{
+ struct afe_port_stop_command stop;
+ int ret = 0;
+
+ if (this_afe.apr == NULL) {
+ pr_err("AFE is already closed\n");
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+ pr_debug("%s: port_id=%d\n", __func__, port_id);
+
+ if ((port_id == RT_PROXY_DAI_001_RX) ||
+ (port_id == RT_PROXY_DAI_002_TX)) {
+ pr_debug("%s: before decrementing pcm_afe_instance %d\n",
+ __func__, pcm_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ pcm_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
+
+ if ((port_id == RT_PROXY_DAI_002_RX) ||
+ (port_id == RT_PROXY_DAI_001_TX)) {
+ pr_debug("%s: before decrementing proxy_afe_instance %d\n",
+ __func__, proxy_afe_instance[port_id & 0x1]);
+ port_id = VIRTUAL_ID_TO_PORTID(port_id);
+ proxy_afe_instance[port_id & 0x1]--;
+ if (!(pcm_afe_instance[port_id & 0x1] == 0 &&
+ proxy_afe_instance[port_id & 0x1] == 0))
+ return 0;
+ else
+ afe_close_done[port_id & 0x1] = true;
+ }
+
+ port_id = afe_convert_virtual_to_portid(port_id);
+
+ stop.hdr.hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD,
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ stop.hdr.pkt_size = sizeof(stop);
+ stop.hdr.src_port = 0;
+ stop.hdr.dest_port = 0;
+ stop.hdr.token = 0;
+ stop.hdr.opcode = AFE_PORT_CMD_STOP;
+ stop.port_id = port_id;
+ stop.reserved = 0;
+
+ atomic_set(&this_afe.state, 1);
+ ret = apr_send_pkt(this_afe.apr, (uint32_t *) &stop);
+
+ if (ret == -ENETRESET) {
+ pr_info("%s: Need to reset, calling APR deregister", __func__);
+ return apr_deregister(this_afe.apr);
+ }
+
+ if (ret < 0) {
+ pr_err("%s: AFE close failed\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+
+ ret = wait_event_timeout(this_afe.wait,
+ (atomic_read(&this_afe.state) == 0),
+ msecs_to_jiffies(TIMEOUT_MS));
+ if (!ret) {
+ pr_err("%s: wait_event timeout\n", __func__);
+ ret = -EINVAL;
+ goto fail_cmd;
+ }
+fail_cmd:
+ return ret;
+}
+
+static int __init afe_init(void)
+{
+ init_waitqueue_head(&this_afe.wait);
+ atomic_set(&this_afe.state, 0);
+ atomic_set(&this_afe.status, 0);
+ this_afe.apr = NULL;
+#ifdef CONFIG_DEBUG_FS
+ debugfs_afelb = debugfs_create_file("afe_loopback",
+ 0220, NULL, (void *) "afe_loopback",
+ &afe_debug_fops);
+
+ debugfs_afelb_gain = debugfs_create_file("afe_loopback_gain",
+ 0220, NULL, (void *) "afe_loopback_gain",
+ &afe_debug_fops);
+
+
+#endif
+ return 0;
+}
+
+static void __exit afe_exit(void)
+{
+ int i;
+#ifdef CONFIG_DEBUG_FS
+ if (debugfs_afelb)
+ debugfs_remove(debugfs_afelb);
+ if (debugfs_afelb_gain)
+ debugfs_remove(debugfs_afelb_gain);
+#endif
+ for (i = 0; i < MAX_AUDPROC_TYPES; i++) {
+ if (afe_cal_addr[i].cal_paddr != 0)
+ afe_cmd_memory_unmap_nowait(
+ afe_cal_addr[i].cal_paddr);
+ }
+}
+
+device_initcall(afe_init);
+__exitcall(afe_exit);
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
new file mode 100644
index 0000000000000..7fc06ae03af65
--- /dev/null
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -0,0 +1,3841 @@
+/*
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/wait.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include <linux/miscdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/msm_audio.h>
+#include <linux/debugfs.h>
+#include <linux/time.h>
+#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/ioctls.h>
+
+#include <sound/qdsp6v2/audio_acdb.h>
+#include <sound/qdsp6v2/rtac.h>
+
+#include <sound/apr_audio.h>
+#include <sound/q6asm.h>
+
+
+#define TRUE 0x01
+#define FALSE 0x00
+#define READDONE_IDX_STATUS 0
+#define READDONE_IDX_BUFFER 1
+#define READDONE_IDX_SIZE 2
+#define READDONE_IDX_OFFSET 3
+#define READDONE_IDX_MSW_TS 4
+#define READDONE_IDX_LSW_TS 5
+#define READDONE_IDX_FLAGS 6
+#define READDONE_IDX_NUMFRAMES 7
+#define READDONE_IDX_ID 8
+#ifdef CONFIG_DEBUG_FS
+#define OUT_BUFFER_SIZE 56
+#define IN_BUFFER_SIZE 24
+#endif
+static DEFINE_MUTEX(session_lock);
+
+/* session id: 0 reserved */
+static struct audio_client *session[SESSION_MAX+1];
+static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv);
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv);
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg);
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg);
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+ uint32_t bufsz, uint32_t bufcnt);
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
+ uint32_t bufsz, uint32_t bufcnt);
+
+static void q6asm_reset_buf_state(struct audio_client *ac);
+
+#ifdef CONFIG_DEBUG_FS
+static struct timeval out_cold_tv;
+static struct timeval out_warm_tv;
+static struct timeval out_cont_tv;
+static struct timeval in_cont_tv;
+static long out_enable_flag;
+static long in_enable_flag;
+static struct dentry *out_dentry;
+static struct dentry *in_dentry;
+static int in_cont_index;
+/*This var is used to keep track of first write done for cold output latency */
+static int out_cold_index;
+static char *out_buffer;
+static char *in_buffer;
+static int audio_output_latency_dbgfs_open(struct inode *inode,
+ struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+static ssize_t audio_output_latency_dbgfs_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\
+ out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\
+ out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec);
+ return simple_read_from_buffer(buf, OUT_BUFFER_SIZE, ppos,
+ out_buffer, OUT_BUFFER_SIZE);
+}
+static ssize_t audio_output_latency_dbgfs_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char *temp;
+
+ if (count > 2*sizeof(char))
+ return -EINVAL;
+ else
+ temp = kmalloc(2*sizeof(char), GFP_KERNEL);
+
+ out_cold_index = 0;
+
+ if (temp) {
+ if (copy_from_user(temp, buf, 2*sizeof(char))) {
+ kfree(temp);
+ return -EFAULT;
+ }
+ if (!kstrtol(temp, 10, &out_enable_flag)) {
+ kfree(temp);
+ return count;
+ }
+ kfree(temp);
+ }
+ return -EINVAL;
+}
+static const struct file_operations audio_output_latency_debug_fops = {
+ .open = audio_output_latency_dbgfs_open,
+ .read = audio_output_latency_dbgfs_read,
+ .write = audio_output_latency_dbgfs_write
+};
+
+static int audio_input_latency_dbgfs_open(struct inode *inode,
+ struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+static ssize_t audio_input_latency_dbgfs_read(struct file *file,
+ char __user *buf, size_t count, loff_t *ppos)
+{
+ snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\
+ in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+ return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos,
+ in_buffer, IN_BUFFER_SIZE);
+}
+static ssize_t audio_input_latency_dbgfs_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ char *temp;
+
+ if (count > 2*sizeof(char))
+ return -EINVAL;
+ else
+ temp = kmalloc(2*sizeof(char), GFP_KERNEL);
+ if (temp) {
+ if (copy_from_user(temp, buf, 2*sizeof(char))) {
+ kfree(temp);
+ return -EFAULT;
+ }
+ if (!kstrtol(temp, 10, &in_enable_flag)) {
+ kfree(temp);
+ return count;
+ }
+ kfree(temp);
+ }
+ return -EINVAL;
+}
+static const struct file_operations audio_input_latency_debug_fops = {
+ .open = audio_input_latency_dbgfs_open,
+ .read = audio_input_latency_dbgfs_read,
+ .write = audio_input_latency_dbgfs_write
+};
+#endif
+struct asm_mmap {
+ atomic_t ref_cnt;
+ atomic_t cmd_state;
+ wait_queue_head_t cmd_wait;
+ void *apr;
+};
+
+static struct asm_mmap this_mmap;
+
+static int q6asm_session_alloc(struct audio_client *ac)
+{
+ int n;
+ mutex_lock(&session_lock);
+ for (n = 1; n <= SESSION_MAX; n++) {
+ if (!session[n]) {
+ session[n] = ac;
+ mutex_unlock(&session_lock);
+ return n;
+ }
+ }
+ mutex_unlock(&session_lock);
+ return -ENOMEM;
+}
+
+static void q6asm_session_free(struct audio_client *ac)
+{
+ pr_debug("%s: sessionid[%d]\n", __func__, ac->session);
+ rtac_remove_popp_from_adm_devices(ac->session);
+ mutex_lock(&session_lock);
+ session[ac->session] = 0;
+ mutex_unlock(&session_lock);
+ ac->session = 0;
+ ac->perf_mode = false;
+ return;
+}
+
+int q6asm_audio_client_buf_free(unsigned int dir,
+ struct audio_client *ac)
+{
+ struct audio_port_data *port;
+ int cnt = 0;
+ int rc = 0;
+ pr_debug("%s: Session id %d\n", __func__, ac->session);
+ mutex_lock(&ac->cmd_lock);
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[dir];
+ if (!port->buf) {
+ mutex_unlock(&ac->cmd_lock);
+ return 0;
+ }
+ cnt = port->max_buf_cnt - 1;
+
+ if (cnt >= 0) {
+ rc = q6asm_memory_unmap_regions(ac, dir,
+ port->buf[0].size,
+ port->max_buf_cnt);
+ if (rc < 0)
+ pr_err("%s CMD Memory_unmap_regions failed\n",
+ __func__);
+ }
+
+ while (cnt >= 0) {
+ if (port->buf[cnt].data) {
+ pr_debug("%s:data[%p]phys[%p][%p] cnt[%d] mem_buffer[%p]\n",
+ __func__, (void *)port->buf[cnt].data,
+ (void *)port->buf[cnt].phys,
+ (void *)&port->buf[cnt].phys, cnt,
+ (void *)port->buf[cnt].mem_buffer);
+
+ dma_free_writecombine(NULL, port->buf[cnt].size, port->buf[cnt].data, port->buf[cnt].phys);
+
+ port->buf[cnt].data = NULL;
+ port->buf[cnt].phys = 0;
+ --(port->max_buf_cnt);
+ }
+ --cnt;
+ }
+ kfree(port->buf);
+ port->buf = NULL;
+ }
+ mutex_unlock(&ac->cmd_lock);
+ return 0;
+}
+
+int q6asm_audio_client_buf_free_contiguous(unsigned int dir,
+ struct audio_client *ac)
+{
+ struct audio_port_data *port;
+ int cnt = 0;
+ int rc = 0;
+ pr_debug("%s: Session id %d\n", __func__, ac->session);
+ mutex_lock(&ac->cmd_lock);
+ port = &ac->port[dir];
+ if (!port->buf) {
+ mutex_unlock(&ac->cmd_lock);
+ return 0;
+ }
+ cnt = port->max_buf_cnt - 1;
+
+ if (cnt >= 0) {
+ rc = q6asm_memory_unmap(ac, port->buf[0].phys, dir);
+ if (rc < 0)
+ pr_err("%s CMD Memory_unmap_regions failed\n",
+ __func__);
+ }
+
+ if (port->buf[0].data) {
+ pr_debug("%s:data[%p]phys[%p][%p] mem_buffer[%p]\n",
+ __func__,
+ (void *)port->buf[0].data,
+ (void *)port->buf[0].phys,
+ (void *)&port->buf[0].phys,
+ (void *)port->buf[0].mem_buffer);
+
+
+ dma_free_writecombine(NULL, port->max_buf_cnt * port->buf[0].size,
+ port->buf[0].data, port->buf[0].phys);
+ }
+
+ while (cnt >= 0) {
+ port->buf[cnt].data = NULL;
+ port->buf[cnt].phys = 0;
+ cnt--;
+ }
+ port->max_buf_cnt = 0;
+ kfree(port->buf);
+ port->buf = NULL;
+ mutex_unlock(&ac->cmd_lock);
+ return 0;
+}
+
+void q6asm_audio_client_free(struct audio_client *ac)
+{
+ int loopcnt;
+ struct audio_port_data *port;
+ if (!ac || !ac->session)
+ return;
+ pr_debug("%s: Session id %d\n", __func__, ac->session);
+ if (ac->io_mode & SYNC_IO_MODE) {
+ for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+ port = &ac->port[loopcnt];
+ if (!port->buf)
+ continue;
+ pr_debug("%s:loopcnt = %d\n", __func__, loopcnt);
+ q6asm_audio_client_buf_free(loopcnt, ac);
+ }
+ }
+
+ apr_deregister(ac->apr);
+ q6asm_session_free(ac);
+
+ pr_debug("%s: APR De-Register\n", __func__);
+ if (atomic_read(&this_mmap.ref_cnt) <= 0) {
+ pr_err("%s: APR Common Port Already Closed\n", __func__);
+ goto done;
+ }
+
+ atomic_dec(&this_mmap.ref_cnt);
+ if (atomic_read(&this_mmap.ref_cnt) == 0) {
+ apr_deregister(this_mmap.apr);
+ pr_debug("%s:APR De-Register common port\n", __func__);
+ }
+done:
+ kfree(ac);
+ return;
+}
+
+int q6asm_set_io_mode(struct audio_client *ac, uint32_t mode)
+{
+ if (ac == NULL) {
+ pr_err("%s APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (mode == ASYNC_IO_MODE) {
+ ac->io_mode &= ~SYNC_IO_MODE;
+ ac->io_mode |= ASYNC_IO_MODE;
+ } else if (mode == SYNC_IO_MODE) {
+ ac->io_mode &= ~ASYNC_IO_MODE;
+ ac->io_mode |= SYNC_IO_MODE;
+ } else {
+ pr_err("%s:Not an valid IO Mode:%d\n", __func__, ac->io_mode);
+ return -EINVAL;
+ }
+
+ pr_debug("%s:Set Mode to %d\n", __func__, ac->io_mode);
+ return 0;
+}
+
+struct audio_client *q6asm_audio_client_alloc(app_cb cb, void *priv)
+{
+ struct audio_client *ac;
+ int n;
+ int lcnt = 0;
+
+ ac = kzalloc(sizeof(struct audio_client), GFP_KERNEL);
+ if (!ac)
+ return NULL;
+ n = q6asm_session_alloc(ac);
+ if (n <= 0)
+ goto fail_session;
+ ac->session = n;
+ ac->cb = cb;
+ ac->priv = priv;
+ ac->io_mode = SYNC_IO_MODE;
+ ac->perf_mode = false;
+ ac->apr = apr_register("ADSP", "ASM", \
+ (apr_fn)q6asm_callback,\
+ ((ac->session) << 8 | 0x0001),\
+ ac);
+
+ if (ac->apr == NULL) {
+ pr_err("%s Registration with APR failed\n", __func__);
+ goto fail;
+ }
+ rtac_set_asm_handle(n, ac->apr);
+
+ pr_debug("%s Registering the common port with APR\n", __func__);
+ if (atomic_read(&this_mmap.ref_cnt) == 0) {
+ this_mmap.apr = apr_register("ADSP", "ASM", \
+ (apr_fn)q6asm_mmapcallback,\
+ 0x0FFFFFFFF, &this_mmap);
+ if (this_mmap.apr == NULL) {
+ pr_debug("%s Unable to register APR ASM common port\n",
+ __func__);
+ goto fail;
+ }
+ }
+
+ atomic_inc(&this_mmap.ref_cnt);
+ init_waitqueue_head(&ac->cmd_wait);
+ init_waitqueue_head(&ac->time_wait);
+ atomic_set(&ac->time_flag, 1);
+ mutex_init(&ac->cmd_lock);
+ for (lcnt = 0; lcnt <= OUT; lcnt++) {
+ mutex_init(&ac->port[lcnt].lock);
+ spin_lock_init(&ac->port[lcnt].dsp_lock);
+ }
+ atomic_set(&ac->cmd_state, 0);
+ atomic_set(&ac->cmd_response, 0);
+
+ pr_debug("%s: session[%d]\n", __func__, ac->session);
+
+ return ac;
+fail:
+ q6asm_audio_client_free(ac);
+ return NULL;
+fail_session:
+ kfree(ac);
+ return NULL;
+}
+
+struct audio_client *q6asm_get_audio_client(int session_id)
+{
+ if ((session_id <= 0) || (session_id > SESSION_MAX)) {
+ pr_err("%s: invalid session: %d\n", __func__, session_id);
+ goto err;
+ }
+
+ if (!session[session_id]) {
+ pr_err("%s: session not active: %d\n", __func__, session_id);
+ goto err;
+ }
+
+ return session[session_id];
+err:
+ return NULL;
+}
+
+int q6asm_audio_client_buf_alloc(unsigned int dir,
+ struct audio_client *ac,
+ unsigned int bufsz,
+ unsigned int bufcnt)
+{
+ int cnt = 0;
+ int rc = 0;
+ struct audio_buffer *buf;
+
+ if (!(ac) || ((dir != IN) && (dir != OUT)))
+ return -EINVAL;
+
+ pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
+ bufsz, bufcnt);
+
+ if (ac->session <= 0 || ac->session > 8)
+ goto fail;
+
+ if (ac->io_mode & SYNC_IO_MODE) {
+ if (ac->port[dir].buf) {
+ pr_debug("%s: buffer already allocated\n", __func__);
+ return 0;
+ }
+ mutex_lock(&ac->cmd_lock);
+ buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+ GFP_KERNEL);
+
+ if (!buf) {
+ mutex_unlock(&ac->cmd_lock);
+ goto fail;
+ }
+
+ ac->port[dir].buf = buf;
+
+ while (cnt < bufcnt) {
+ if (bufsz > 0) {
+ if (!buf[cnt].data) {
+ buf[cnt].size = bufsz;
+ buf[cnt].data = dma_alloc_writecombine(NULL, bufsz,
+ &buf[cnt].phys, GFP_KERNEL);
+ if (WARN_ON(IS_ERR_OR_NULL(buf[0].data))) {
+ pr_err("%s: allocation failed\n", __func__);
+ goto fail;
+ }
+
+
+ buf[cnt].used = 1;
+ buf[cnt].size = bufsz;
+ buf[cnt].actual_size = bufsz;
+ pr_debug("%s data[%p]phys[%p][%p]\n",
+ __func__,
+ (void *)buf[cnt].data,
+ (void *)buf[cnt].phys,
+ (void *)&buf[cnt].phys);
+ cnt++;
+ }
+ }
+ }
+ ac->port[dir].max_buf_cnt = cnt;
+
+ mutex_unlock(&ac->cmd_lock);
+ rc = q6asm_memory_map_regions(ac, dir, bufsz, cnt);
+ if (rc < 0) {
+ pr_err("%s:CMD Memory_map_regions failed\n", __func__);
+ goto fail;
+ }
+ }
+ return 0;
+fail:
+ q6asm_audio_client_buf_free(dir, ac);
+ return -EINVAL;
+}
+
+int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
+ struct audio_client *ac,
+ unsigned int bufsz,
+ unsigned int bufcnt)
+{
+ int cnt = 0;
+ int rc = 0;
+ struct audio_buffer *buf;
+ if (!(ac) || ((dir != IN) && (dir != OUT)))
+ return -EINVAL;
+
+ pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n",
+ __func__, ac->session,
+ bufsz, bufcnt);
+
+ if (ac->session <= 0 || ac->session > 8)
+ goto fail;
+
+ if (ac->port[dir].buf) {
+ pr_debug("%s: buffer already allocated\n", __func__);
+ return 0;
+ }
+ mutex_lock(&ac->cmd_lock);
+ buf = kzalloc(((sizeof(struct audio_buffer))*bufcnt),
+ GFP_KERNEL);
+
+ if (!buf) {
+ mutex_unlock(&ac->cmd_lock);
+ goto fail;
+ }
+
+ ac->port[dir].buf = buf;
+
+
+ buf[0].size = bufsz * bufcnt;
+ buf[0].data = dma_alloc_writecombine(NULL, bufsz *bufcnt,
+ &buf[0].phys, GFP_KERNEL);
+ if (WARN_ON(IS_ERR_OR_NULL(buf[0].data))) {
+ pr_err("%s: allocation failed\n", __func__);
+ goto fail;
+ }
+
+ if (!buf[0].data) {
+ pr_err("%s:invalid vaddr, iomap failed\n", __func__);
+ mutex_unlock(&ac->cmd_lock);
+ goto fail;
+ }
+
+ buf[0].used = dir ^ 1;
+ buf[0].size = bufsz;
+ buf[0].actual_size = bufsz;
+ cnt = 1;
+ while (cnt < bufcnt) {
+ if (bufsz > 0) {
+ buf[cnt].data = buf[0].data + (cnt * bufsz);
+ buf[cnt].phys = buf[0].phys + (cnt * bufsz);
+ if (!buf[cnt].data) {
+ pr_err("%s Buf alloc failed\n",
+ __func__);
+ mutex_unlock(&ac->cmd_lock);
+ goto fail;
+ }
+ buf[cnt].used = dir ^ 1;
+ buf[cnt].size = bufsz;
+ buf[cnt].actual_size = bufsz;
+ pr_debug("%s data[%p]phys[%p][%p]\n", __func__,
+ (void *)buf[cnt].data,
+ (void *)buf[cnt].phys,
+ (void *)&buf[cnt].phys);
+ }
+ cnt++;
+ }
+ ac->port[dir].max_buf_cnt = cnt;
+
+ pr_debug("%s ac->port[%d].max_buf_cnt[%d]\n", __func__, dir,
+ ac->port[dir].max_buf_cnt);
+ mutex_unlock(&ac->cmd_lock);
+ rc = q6asm_memory_map(ac, buf[0].phys, dir, bufsz, cnt);
+ if (rc < 0) {
+ pr_err("%s:CMD Memory_map_regions failed\n", __func__);
+ goto fail;
+ }
+ return 0;
+fail:
+ q6asm_audio_client_buf_free_contiguous(dir, ac);
+ return -EINVAL;
+}
+
+static int32_t q6asm_mmapcallback(struct apr_client_data *data, void *priv)
+{
+ uint32_t token;
+ uint32_t *payload = data->payload;
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("%s: Reset event is received: %d %d apr[%p]\n",
+ __func__,
+ data->reset_event,
+ data->reset_proc,
+ this_mmap.apr);
+ apr_reset(this_mmap.apr);
+ this_mmap.apr = NULL;
+ atomic_set(&this_mmap.cmd_state, 0);
+ return 0;
+ }
+
+ pr_debug("%s:ptr0[0x%x]ptr1[0x%x]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+ __func__, payload[0], payload[1], data->opcode, data->token,
+ data->payload_size, data->src_port, data->dest_port);
+
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ token = data->token;
+ switch (payload[0]) {
+ case ASM_SESSION_CMD_MEMORY_MAP:
+ case ASM_SESSION_CMD_MEMORY_UNMAP:
+ case ASM_SESSION_CMD_MEMORY_MAP_REGIONS:
+ case ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS:
+ pr_debug("%s:command[0x%x]success [0x%x]\n",
+ __func__, payload[0], payload[1]);
+ if (atomic_read(&this_mmap.cmd_state)) {
+ atomic_set(&this_mmap.cmd_state, 0);
+ wake_up(&this_mmap.cmd_wait);
+ }
+ break;
+ default:
+ pr_debug("%s:command[0x%x] not expecting rsp\n",
+ __func__, payload[0]);
+ break;
+ }
+ }
+ return 0;
+}
+
+static int32_t is_no_wait_cmd_rsp(uint32_t opcode, uint32_t *cmd_type)
+{
+ if (opcode == APR_BASIC_RSP_RESULT) {
+ if (cmd_type != NULL) {
+ switch (cmd_type[0]) {
+ case ASM_SESSION_CMD_RUN:
+ case ASM_SESSION_CMD_PAUSE:
+ case ASM_DATA_CMD_EOS:
+ return 1;
+ default:
+ break;
+ }
+ } else
+ pr_err("%s: null pointer!", __func__);
+ } else if (opcode == ASM_DATA_CMDRSP_EOS)
+ return 1;
+
+ return 0;
+}
+
+static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
+{
+ int i = 0;
+ struct audio_client *ac = (struct audio_client *)priv;
+ uint32_t token;
+ unsigned long dsp_flags;
+ uint32_t *payload;
+ uint32_t wakeup_flag = 1;
+
+
+ if ((ac == NULL) || (data == NULL)) {
+ pr_err("ac or priv NULL\n");
+ return -EINVAL;
+ }
+ if (ac->session <= 0 || ac->session > 8) {
+ pr_err("%s:Session ID is invalid, session = %d\n", __func__,
+ ac->session);
+ return -EINVAL;
+ }
+
+ payload = data->payload;
+ if ((atomic_read(&ac->nowait_cmd_cnt) > 0) &&
+ is_no_wait_cmd_rsp(data->opcode, payload)) {
+ pr_debug("%s: nowait_cmd_cnt %d\n",
+ __func__,
+ atomic_read(&ac->nowait_cmd_cnt));
+ atomic_dec(&ac->nowait_cmd_cnt);
+ wakeup_flag = 0;
+ }
+
+ if (data->opcode == RESET_EVENTS) {
+ pr_debug("q6asm_callback: Reset event is received: %d %d apr[%p]\n",
+ data->reset_event, data->reset_proc, ac->apr);
+ if (ac->cb)
+ ac->cb(data->opcode, data->token,
+ (uint32_t *)data->payload, ac->priv);
+ apr_reset(ac->apr);
+ return 0;
+ }
+
+ pr_debug("%s: session[%d]opcode[0x%x] token[0x%x]payload_s[%d] src[%d] dest[%d]\n",
+ __func__,
+ ac->session, data->opcode,
+ data->token, data->payload_size, data->src_port,
+ data->dest_port);
+
+ if (data->opcode == APR_BASIC_RSP_RESULT) {
+ token = data->token;
+ pr_debug("%s payload[0]:%x", __func__, payload[0]);
+ switch (payload[0]) {
+ case ASM_STREAM_CMD_SET_PP_PARAMS:
+ if (rtac_make_asm_callback(ac->session, payload,
+ data->payload_size))
+ break;
+ case ASM_SESSION_CMD_PAUSE:
+ case ASM_DATA_CMD_EOS:
+ case ASM_STREAM_CMD_CLOSE:
+ case ASM_STREAM_CMD_FLUSH:
+ case ASM_SESSION_CMD_RUN:
+ case ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS:
+ case ASM_STREAM_CMD_FLUSH_READBUFS:
+ pr_debug("%s:Payload = [0x%x]\n", __func__, payload[0]);
+ if (token != ac->session) {
+ pr_err("%s:Invalid session[%d] rxed expected[%d]",
+ __func__, token, ac->session);
+ return -EINVAL;
+ }
+ case ASM_STREAM_CMD_OPEN_READ:
+ case ASM_STREAM_CMD_OPEN_READ_V2_1:
+ case ASM_STREAM_CMD_OPEN_WRITE:
+ case ASM_STREAM_CMD_OPEN_WRITE_V2_1:
+ case ASM_STREAM_CMD_OPEN_READWRITE:
+ case ASM_STREAM_CMD_OPEN_LOOPBACK:
+ case ASM_DATA_CMD_MEDIA_FORMAT_UPDATE:
+ case ASM_STREAM_CMD_SET_ENCDEC_PARAM:
+ case ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED:
+ case ASM_STREAM_CMD_OPEN_READ_COMPRESSED:
+ if (payload[0] == ASM_STREAM_CMD_CLOSE) {
+ atomic_set(&ac->cmd_close_state, 0);
+ wake_up(&ac->cmd_wait);
+ } else if (atomic_read(&ac->cmd_state) &&
+ wakeup_flag) {
+ atomic_set(&ac->cmd_state, 0);
+ if (payload[1] == ADSP_EUNSUPPORTED) {
+ pr_debug("paload[1]:%d unsupported",
+ payload[1]);
+ atomic_set(&ac->cmd_response, 1);
+ }
+ else
+ atomic_set(&ac->cmd_response, 0);
+ wake_up(&ac->cmd_wait);
+ }
+ if (ac->cb)
+ ac->cb(data->opcode, data->token,
+ (uint32_t *)data->payload, ac->priv);
+ break;
+ default:
+ pr_debug("%s:command[0x%x] not expecting rsp\n",
+ __func__, payload[0]);
+ break;
+ }
+ return 0;
+ }
+
+ switch (data->opcode) {
+ case ASM_DATA_EVENT_WRITE_DONE:{
+ struct audio_port_data *port = &ac->port[IN];
+ pr_debug("%s: Rxed opcode[0x%x] status[0x%x] token[%d]",
+ __func__, payload[0], payload[1],
+ data->token);
+ if (ac->io_mode & SYNC_IO_MODE) {
+ if (port->buf == NULL) {
+ pr_err("%s: Unexpected Write Done\n",
+ __func__);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+ if (port->buf[data->token].phys !=
+ payload[0]) {
+ pr_err("Buf expected[%p]rxed[%p]\n",\
+ (void *)port->buf[data->token].phys,\
+ (void *)payload[0]);
+ spin_unlock_irqrestore(&port->dsp_lock,
+ dsp_flags);
+ return -EINVAL;
+ }
+ token = data->token;
+ port->buf[token].used = 1;
+ spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+#ifdef CONFIG_DEBUG_FS
+ if (out_enable_flag) {
+ /* For first Write done log the time and reset
+ out_cold_index*/
+ if (out_cold_index != 1) {
+ do_gettimeofday(&out_cold_tv);
+ pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",
+ out_cold_tv.tv_sec,
+ out_cold_tv.tv_usec);
+ out_cold_index = 1;
+ }
+ pr_debug("out_enable_flag %ld",\
+ out_enable_flag);
+ }
+#endif
+ for (i = 0; i < port->max_buf_cnt; i++)
+ pr_debug("%d ", port->buf[i].used);
+
+ }
+ break;
+ }
+ case ASM_STREAM_CMDRSP_GET_PP_PARAMS:
+ rtac_make_asm_callback(ac->session, payload,
+ data->payload_size);
+ break;
+ case ASM_DATA_EVENT_READ_DONE:{
+
+ struct audio_port_data *port = &ac->port[OUT];
+#ifdef CONFIG_DEBUG_FS
+ if (in_enable_flag) {
+ /* when in_cont_index == 7, DSP would be
+ * writing into the 8th 512 byte buffer and this
+ * timestamp is tapped here.Once done it then writes
+ * to 9th 512 byte buffer.These two buffers(8th, 9th)
+ * reach the test application in 5th iteration and that
+ * timestamp is tapped at user level. The difference
+ * of these two timestamps gives us the time between
+ * the time at which dsp started filling the sample
+ * required and when it reached the test application.
+ * Hence continuous input latency
+ */
+ if (in_cont_index == 7) {
+ do_gettimeofday(&in_cont_tv);
+ pr_err("In_CONT:previous read buffer done at %ld sec %ld microsec\n",
+ in_cont_tv.tv_sec, in_cont_tv.tv_usec);
+ }
+ }
+#endif
+ pr_debug("%s:R-D: status=%d buff_add=%x act_size=%d offset=%d\n",
+ __func__, payload[READDONE_IDX_STATUS],
+ payload[READDONE_IDX_BUFFER],
+ payload[READDONE_IDX_SIZE],
+ payload[READDONE_IDX_OFFSET]);
+ pr_debug("%s:R-D:msw_ts=%d lsw_ts=%d flags=%d id=%d num=%d\n",
+ __func__, payload[READDONE_IDX_MSW_TS],
+ payload[READDONE_IDX_LSW_TS],
+ payload[READDONE_IDX_FLAGS],
+ payload[READDONE_IDX_ID],
+ payload[READDONE_IDX_NUMFRAMES]);
+#ifdef CONFIG_DEBUG_FS
+ if (in_enable_flag)
+ in_cont_index++;
+#endif
+ if (ac->io_mode & SYNC_IO_MODE) {
+ if (port->buf == NULL) {
+ pr_err("%s: Unexpected Write Done\n", __func__);
+ return -EINVAL;
+ }
+ spin_lock_irqsave(&port->dsp_lock, dsp_flags);
+ token = data->token;
+ port->buf[token].used = 0;
+ if (port->buf[token].phys !=
+ payload[READDONE_IDX_BUFFER]) {
+ pr_err("Buf expected[%p]rxed[%p]\n",\
+ (void *)port->buf[token].phys,\
+ (void *)payload[READDONE_IDX_BUFFER]);
+ spin_unlock_irqrestore(&port->dsp_lock,
+ dsp_flags);
+ break;
+ }
+ port->buf[token].actual_size =
+ payload[READDONE_IDX_SIZE];
+ spin_unlock_irqrestore(&port->dsp_lock, dsp_flags);
+ }
+ break;
+ }
+ case ASM_DATA_EVENT_EOS:
+ case ASM_DATA_CMDRSP_EOS:
+ pr_debug("%s:EOS ACK received: rxed opcode[0x%x]\n",
+ __func__, data->opcode);
+ break;
+ case ASM_STREAM_CMDRSP_GET_ENCDEC_PARAM:
+ break;
+ case ASM_SESSION_EVENT_TX_OVERFLOW:
+ pr_err("ASM_SESSION_EVENT_TX_OVERFLOW\n");
+ break;
+ case ASM_SESSION_CMDRSP_GET_SESSION_TIME:
+ pr_debug("%s: ASM_SESSION_CMDRSP_GET_SESSION_TIME, payload[0] = %d, payload[1] = %d, payload[2] = %d\n",
+ __func__,
+ payload[0], payload[1], payload[2]);
+ ac->time_stamp = (uint64_t)(((uint64_t)payload[1] << 32) |
+ payload[2]);
+ if (atomic_read(&ac->time_flag)) {
+ atomic_set(&ac->time_flag, 0);
+ wake_up(&ac->time_wait);
+ }
+ break;
+ case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY:
+ case ASM_DATA_EVENT_ENC_SR_CM_NOTIFY:
+ pr_debug("%s: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0] = %d, payload[1] = %d, payload[2] = %d, payload[3] = %d\n",
+ __func__,
+ payload[0], payload[1], payload[2],
+ payload[3]);
+ break;
+ }
+ if (ac->cb)
+ ac->cb(data->opcode, data->token,
+ data->payload, ac->priv);
+
+ return 0;
+}
+
+void *q6asm_is_cpu_buf_avail(int dir, struct audio_client *ac, uint32_t *size,
+ uint32_t *index)
+{
+ void *data;
+ unsigned char idx;
+ struct audio_port_data *port;
+
+ if (!ac || ((dir != IN) && (dir != OUT)))
+ return NULL;
+
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[dir];
+
+ mutex_lock(&port->lock);
+ idx = port->cpu_buf;
+ if (port->buf == NULL) {
+ pr_debug("%s:Buffer pointer null\n", __func__);
+ mutex_unlock(&port->lock);
+ return NULL;
+ }
+ /* dir 0: used = 0 means buf in use
+ dir 1: used = 1 means buf in use */
+ if (port->buf[idx].used == dir) {
+ /* To make it more robust, we could loop and get the
+ next avail buf, its risky though */
+ pr_debug("%s:Next buf idx[0x%x] not available,dir[%d]\n",
+ __func__, idx, dir);
+ mutex_unlock(&port->lock);
+ return NULL;
+ }
+ *size = port->buf[idx].actual_size;
+ *index = port->cpu_buf;
+ data = port->buf[idx].data;
+ pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
+ __func__,
+ ac->session,
+ port->cpu_buf,
+ data, *size);
+ /* By default increase the cpu_buf cnt
+ user accesses this function,increase cpu
+ buf(to avoid another api)*/
+ port->buf[idx].used = dir;
+ port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
+ mutex_unlock(&port->lock);
+ return data;
+ }
+ return NULL;
+}
+
+void *q6asm_is_cpu_buf_avail_nolock(int dir, struct audio_client *ac,
+ uint32_t *size, uint32_t *index)
+{
+ void *data;
+ unsigned char idx;
+ struct audio_port_data *port;
+
+ if (!ac || ((dir != IN) && (dir != OUT)))
+ return NULL;
+
+ port = &ac->port[dir];
+
+ idx = port->cpu_buf;
+ if (port->buf == NULL) {
+ pr_debug("%s:Buffer pointer null\n", __func__);
+ return NULL;
+ }
+ /*
+ * dir 0: used = 0 means buf in use
+ * dir 1: used = 1 means buf in use
+ */
+ if (port->buf[idx].used == dir) {
+ /*
+ * To make it more robust, we could loop and get the
+ * next avail buf, its risky though
+ */
+ pr_debug("%s:Next buf idx[0x%x] not available, dir[%d]\n",
+ __func__, idx, dir);
+ return NULL;
+ }
+ *size = port->buf[idx].actual_size;
+ *index = port->cpu_buf;
+ data = port->buf[idx].data;
+ pr_debug("%s:session[%d]index[%d] data[%p]size[%d]\n",
+ __func__, ac->session, port->cpu_buf,
+ data, *size);
+ /*
+ * By default increase the cpu_buf cnt
+ * user accesses this function,increase cpu
+ * buf(to avoid another api)
+ */
+ port->buf[idx].used = dir;
+ port->cpu_buf = ((port->cpu_buf + 1) & (port->max_buf_cnt - 1));
+ return data;
+}
+
+int q6asm_is_dsp_buf_avail(int dir, struct audio_client *ac)
+{
+ int ret = -1;
+ struct audio_port_data *port;
+ uint32_t idx;
+
+ if (!ac || (dir != OUT))
+ return ret;
+
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[dir];
+
+ mutex_lock(&port->lock);
+ idx = port->dsp_buf;
+
+ if (port->buf[idx].used == (dir ^ 1)) {
+ /* To make it more robust, we could loop and get the
+ next avail buf, its risky though */
+ pr_err("Next buf idx[0x%x] not available, dir[%d]\n",
+ idx, dir);
+ mutex_unlock(&port->lock);
+ return ret;
+ }
+ pr_debug("%s: session[%d]dsp_buf=%d cpu_buf=%d\n", __func__,
+ ac->session, port->dsp_buf, port->cpu_buf);
+ ret = ((port->dsp_buf != port->cpu_buf) ? 0 : -1);
+ mutex_unlock(&port->lock);
+ }
+ return ret;
+}
+
+static void q6asm_add_hdr(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg)
+{
+ pr_debug("%s:session=%d pkt size=%d cmd_flg=%d\n", __func__, pkt_size,
+ cmd_flg, ac->session);
+ mutex_lock(&ac->cmd_lock);
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(sizeof(struct apr_hdr)),\
+ APR_PKT_VER);
+ hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->dest_svc = APR_SVC_ASM;
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+ hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
+ if (cmd_flg) {
+ hdr->token = ac->session;
+ atomic_set(&ac->cmd_state, 1);
+ }
+ hdr->pkt_size = pkt_size;
+ mutex_unlock(&ac->cmd_lock);
+ return;
+}
+
+static void q6asm_add_mmaphdr(struct apr_hdr *hdr, uint32_t pkt_size,
+ uint32_t cmd_flg)
+{
+ pr_debug("%s:pkt size=%d cmd_flg=%d\n", __func__, pkt_size, cmd_flg);
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER);
+ hdr->src_port = 0;
+ hdr->dest_port = 0;
+ if (cmd_flg) {
+ hdr->token = 0;
+ atomic_set(&this_mmap.cmd_state, 1);
+ }
+ hdr->pkt_size = pkt_size;
+ return;
+}
+
+int q6asm_open_read(struct audio_client *ac,
+ uint32_t format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_read open;
+#ifdef CONFIG_DEBUG_FS
+ in_cont_index = 0;
+#endif
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s:session[%d]", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ;
+ /* Stream prio : High, provide meta info with encoded frames */
+ open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+
+ open.pre_proc_top = get_asm_topology();
+ if (open.pre_proc_top == 0)
+ open.pre_proc_top = DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.uMode = STREAM_PRIORITY_HIGH;
+ open.format = LINEAR_PCM;
+ break;
+ case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+ open.uMode = STREAM_PRIORITY_HIGH;
+ open.format = MULTI_CHANNEL_PCM;
+ break;
+ case FORMAT_MPEG4_AAC:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = MPEG4_AAC;
+ break;
+ case FORMAT_V13K:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = V13K_FS;
+ break;
+ case FORMAT_EVRC:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = EVRC_FS;
+ break;
+ case FORMAT_AMRNB:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = AMRNB_FS;
+ break;
+ case FORMAT_AMRWB:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = AMRWB_FS;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ goto fail_cmd;
+ }
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", \
+ open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__,
+ rc);
+ goto fail_cmd;
+ }
+
+ ac->io_mode |= TUN_READ_IO_MODE;
+
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_open_read_v2_1(struct audio_client *ac,
+ uint32_t format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_read_v2_1 open;
+#ifdef CONFIG_DEBUG_FS
+ in_cont_index = 0;
+#endif
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s:session[%d]", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_V2_1;
+ open.src_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+ open.pre_proc_top = get_asm_topology();
+ if (open.pre_proc_top == 0)
+ open.pre_proc_top = DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.uMode = STREAM_PRIORITY_HIGH;
+ open.format = LINEAR_PCM;
+ break;
+ case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+ open.uMode = STREAM_PRIORITY_HIGH;
+ open.format = MULTI_CHANNEL_PCM;
+ break;
+ case FORMAT_MPEG4_AAC:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = MPEG4_AAC;
+ break;
+ case FORMAT_V13K:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = V13K_FS;
+ break;
+ case FORMAT_EVRC:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = EVRC_FS;
+ break;
+ case FORMAT_AMRNB:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = AMRNB_FS;
+ break;
+ case FORMAT_AMRWB:
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_HIGH;
+ open.format = AMRWB_FS;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ goto fail_cmd;
+ }
+ open.uMode = ASM_OPEN_READ_PERF_MODE_BIT;
+ open.bits_per_sample = PCM_BITS_PER_SAMPLE;
+ open.reserved = 0;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", \
+ open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__,
+ rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+
+int q6asm_open_read_compressed(struct audio_client *ac,
+ uint32_t frames_per_buffer, uint32_t meta_data_mode)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_read_compressed open;
+#ifdef CONFIG_DEBUG_FS
+ in_cont_index = 0;
+#endif
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s:session[%d]", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READ_COMPRESSED;
+ /* hardcoded as following*/
+ open.frame_per_buf = frames_per_buffer;
+ open.uMode = meta_data_mode;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_READ_COMPRESSED rc[%d]\n",
+ __func__, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_open_write_compressed(struct audio_client *ac, uint32_t format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_write_compressed open;
+
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session,
+ format);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_COMPRESSED;
+
+ switch (format) {
+ case FORMAT_AC3:
+ open.format = AC3_DECODER;
+ break;
+ case FORMAT_EAC3:
+ open.format = EAC3_DECODER;
+ break;
+ case FORMAT_MP3:
+ open.format = MP3;
+ break;
+ case FORMAT_DTS:
+ open.format = DTS;
+ break;
+ case FORMAT_DTS_LBR:
+ open.format = DTS_LBR;
+ break;
+ case FORMAT_AAC:
+ open.format = MPEG4_AAC;
+ break;
+ case FORMAT_ATRAC:
+ open.format = ATRAC;
+ break;
+ case FORMAT_WMA_V10PRO:
+ open.format = WMA_V10PRO;
+ break;
+ case FORMAT_MAT:
+ open.format = MAT;
+ break;
+ default:
+ pr_err("%s: Invalid format[%d]\n", __func__, format);
+ goto fail_cmd;
+ }
+ /*Below flag indicates the DSP that Compressed audio input
+ stream is not IEC 61937 or IEC 60958 packetizied*/
+ open.flags = 0x00000000;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("%s: open failed op[0x%x]rc[%d]\n", \
+ __func__, open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__,
+ rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_open_write(struct audio_client *ac, uint32_t format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_write open;
+
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d] wr_format[0x%x]", __func__, ac->session,
+ format);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+
+ if (ac->perf_mode) {
+ pr_debug("%s In Performance/lowlatency mode", __func__);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE_V2_1;
+ open.uMode = ASM_OPEN_WRITE_PERF_MODE_BIT;
+ /* source endpoint : matrix */
+ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+ open.stream_handle = PCM_BITS_PER_SAMPLE;
+ } else {
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_WRITE;
+ open.uMode = STREAM_PRIORITY_HIGH;
+ /* source endpoint : matrix */
+ open.sink_endpoint = ASM_END_POINT_DEVICE_MATRIX;
+ open.stream_handle = 0x00;
+ }
+ open.post_proc_top = get_asm_topology();
+ if (open.post_proc_top == 0)
+ open.post_proc_top = DEFAULT_POPP_TOPOLOGY;
+
+ switch (format) {
+ case FORMAT_LINEAR_PCM:
+ open.format = LINEAR_PCM;
+ break;
+ case FORMAT_MULTI_CHANNEL_LINEAR_PCM:
+ open.format = MULTI_CHANNEL_PCM;
+ break;
+ case FORMAT_MPEG4_AAC:
+ open.format = MPEG4_AAC;
+ break;
+ case FORMAT_MPEG4_MULTI_AAC:
+ open.format = MPEG4_MULTI_AAC;
+ break;
+ case FORMAT_WMA_V9:
+ open.format = WMA_V9;
+ break;
+ case FORMAT_WMA_V10PRO:
+ open.format = WMA_V10PRO;
+ break;
+ case FORMAT_MP3:
+ open.format = MP3;
+ break;
+ case FORMAT_DTS:
+ open.format = DTS;
+ break;
+ case FORMAT_DTS_LBR:
+ open.format = DTS_LBR;
+ break;
+ case FORMAT_AMRWB:
+ open.format = AMRWB_FS;
+ pr_debug("q6asm_open_write FORMAT_AMRWB");
+ break;
+ case FORMAT_AMR_WB_PLUS:
+ open.format = AMR_WB_PLUS;
+ pr_debug("q6asm_open_write FORMAT_AMR_WB_PLUS");
+ break;
+ default:
+ pr_err("%s: Invalid format[%d]\n", __func__, format);
+ goto fail_cmd;
+ }
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("%s: open failed op[0x%x]rc[%d]\n", \
+ __func__, open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout. waited for OPEN_WRITE rc[%d]\n", __func__,
+ rc);
+ goto fail_cmd;
+ }
+ if (atomic_read(&ac->cmd_response)) {
+ pr_err("%s: format = %x not supported\n", __func__, format);
+ goto fail_cmd;
+ }
+
+ ac->io_mode |= TUN_WRITE_IO_MODE;
+
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_open_read_write(struct audio_client *ac,
+ uint32_t rd_format,
+ uint32_t wr_format)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_read_write open;
+
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d]", __func__, ac->session);
+ pr_debug("wr_format[0x%x]rd_format[0x%x]",
+ wr_format, rd_format);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_READWRITE;
+
+ open.uMode = BUFFER_META_ENABLE | STREAM_PRIORITY_NORMAL;
+ /* source endpoint : matrix */
+ open.post_proc_top = get_asm_topology();
+ if (open.post_proc_top == 0)
+ open.post_proc_top = DEFAULT_POPP_TOPOLOGY;
+
+ switch (wr_format) {
+ case FORMAT_LINEAR_PCM:
+ open.write_format = LINEAR_PCM;
+ break;
+ case FORMAT_MPEG4_AAC:
+ open.write_format = MPEG4_AAC;
+ break;
+ case FORMAT_MPEG4_MULTI_AAC:
+ open.write_format = MPEG4_MULTI_AAC;
+ break;
+ case FORMAT_WMA_V9:
+ open.write_format = WMA_V9;
+ break;
+ case FORMAT_WMA_V10PRO:
+ open.write_format = WMA_V10PRO;
+ break;
+ case FORMAT_AMRNB:
+ open.write_format = AMRNB_FS;
+ break;
+ case FORMAT_AMRWB:
+ open.write_format = AMRWB_FS;
+ break;
+ case FORMAT_AMR_WB_PLUS:
+ open.write_format = AMR_WB_PLUS;
+ break;
+ case FORMAT_V13K:
+ open.write_format = V13K_FS;
+ break;
+ case FORMAT_EVRC:
+ open.write_format = EVRC_FS;
+ break;
+ case FORMAT_EVRCB:
+ open.write_format = EVRCB_FS;
+ break;
+ case FORMAT_EVRCWB:
+ open.write_format = EVRCWB_FS;
+ break;
+ case FORMAT_MP3:
+ open.write_format = MP3;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", wr_format);
+ goto fail_cmd;
+ }
+
+ switch (rd_format) {
+ case FORMAT_LINEAR_PCM:
+ open.read_format = LINEAR_PCM;
+ break;
+ case FORMAT_MPEG4_AAC:
+ open.read_format = MPEG4_AAC;
+ break;
+ case FORMAT_V13K:
+ open.read_format = V13K_FS;
+ break;
+ case FORMAT_EVRC:
+ open.read_format = EVRC_FS;
+ break;
+ case FORMAT_AMRNB:
+ open.read_format = AMRNB_FS;
+ break;
+ case FORMAT_AMRWB:
+ open.read_format = AMRWB_FS;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", rd_format);
+ goto fail_cmd;
+ }
+ pr_debug("%s:rdformat[0x%x]wrformat[0x%x]\n", __func__,
+ open.read_format, open.write_format);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", \
+ open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for OPEN_WRITE rc[%d]\n", rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_open_loopack(struct audio_client *ac)
+{
+ int rc = 0x00;
+ struct asm_stream_cmd_open_loopback open;
+
+ if ((ac == NULL) || (ac->apr == NULL)) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d]", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &open.hdr, sizeof(open), TRUE);
+ open.hdr.opcode = ASM_STREAM_CMD_OPEN_LOOPBACK;
+
+ open.mode_flags = 0;
+ open.src_endpointype = 0;
+ open.sink_endpointype = 0;
+ /* source endpoint : matrix */
+ open.postprocopo_id = get_asm_topology();
+ if (open.postprocopo_id == 0)
+ open.postprocopo_id = DEFAULT_POPP_TOPOLOGY;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &open);
+ if (rc < 0) {
+ pr_err("open failed op[0x%x]rc[%d]\n", \
+ open.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for OPEN_WRITE rc[%d]\n", rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_run(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+ struct asm_stream_cmd_run run;
+ int rc;
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s session[%d]", __func__, ac->session);
+ q6asm_add_hdr(ac, &run.hdr, sizeof(run), TRUE);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN;
+ run.flags = flags;
+ run.msw_ts = msw_ts;
+ run.lsw_ts = lsw_ts;
+#ifdef CONFIG_DEBUG_FS
+ if (out_enable_flag) {
+ do_gettimeofday(&out_cold_tv);
+ pr_debug("COLD: apr_send_pkt at %ld sec %ld microsec\n",\
+ out_cold_tv.tv_sec, out_cold_tv.tv_usec);
+ }
+#endif
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+ if (rc < 0) {
+ pr_err("Commmand run failed[%d]", rc);
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for run success rc[%d]", rc);
+ goto fail_cmd;
+ }
+
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_run_nowait(struct audio_client *ac, uint32_t flags,
+ uint32_t msw_ts, uint32_t lsw_ts)
+{
+ struct asm_stream_cmd_run run;
+ int rc;
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s:APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ pr_debug("session[%d]", ac->session);
+ q6asm_add_hdr_async(ac, &run.hdr, sizeof(run), TRUE);
+
+ run.hdr.opcode = ASM_SESSION_CMD_RUN;
+ run.flags = flags;
+ run.msw_ts = msw_ts;
+ run.lsw_ts = lsw_ts;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &run);
+ if (rc < 0) {
+ pr_err("%s:Commmand run failed[%d]", __func__, rc);
+ return -EINVAL;
+ }
+ atomic_inc(&ac->nowait_cmd_cnt);
+ return 0;
+}
+
+
+int q6asm_enc_cfg_blk_aac(struct audio_client *ac,
+ uint32_t frames_per_buf,
+ uint32_t sample_rate, uint32_t channels,
+ uint32_t bit_rate, uint32_t mode, uint32_t format)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]frames[%d]SR[%d]ch[%d]bitrate[%d]mode[%d] format[%d]",
+ __func__, ac->session, frames_per_buf,
+ sample_rate, channels, bit_rate, mode, format);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+ enc_cfg.enc_blk.frames_per_buf = frames_per_buf;
+ enc_cfg.enc_blk.format_id = MPEG4_AAC;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_aac_read_cfg);
+ enc_cfg.enc_blk.cfg.aac.bitrate = bit_rate;
+ enc_cfg.enc_blk.cfg.aac.enc_mode = mode;
+ enc_cfg.enc_blk.cfg.aac.format = format;
+ enc_cfg.enc_blk.cfg.aac.ch_cfg = channels;
+ enc_cfg.enc_blk.cfg.aac.sample_rate = sample_rate;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for FORMAT_UPDATE\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+ ac->session, rate, channels);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+ enc_cfg.enc_blk.frames_per_buf = 1;
+ enc_cfg.enc_blk.format_id = LINEAR_PCM;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg);
+ enc_cfg.enc_blk.cfg.pcm.ch_cfg = channels;
+ enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16;
+ enc_cfg.enc_blk.cfg.pcm.sample_rate = rate;
+ enc_cfg.enc_blk.cfg.pcm.is_signed = 1;
+ enc_cfg.enc_blk.cfg.pcm.interleaved = 1;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd open failed\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_pcm_native(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, rate = %d, channels = %d, setting the rate and channels to 0 for native\n",
+ __func__, ac->session, rate, channels);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+ enc_cfg.enc_blk.frames_per_buf = 1;
+ enc_cfg.enc_blk.format_id = LINEAR_PCM;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_pcm_cfg);
+ enc_cfg.enc_blk.cfg.pcm.ch_cfg = 0;/*channels;*/
+ enc_cfg.enc_blk.cfg.pcm.bits_per_sample = 16;
+ enc_cfg.enc_blk.cfg.pcm.sample_rate = 0;/*rate;*/
+ enc_cfg.enc_blk.cfg.pcm.is_signed = 1;
+ enc_cfg.enc_blk.cfg.pcm.interleaved = 1;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd open failed\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, rate = %d, channels = %d\n", __func__,
+ ac->session, rate, channels);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+ enc_cfg.enc_blk.frames_per_buf = 1;
+ enc_cfg.enc_blk.format_id = MULTI_CHANNEL_PCM;
+ enc_cfg.enc_blk.cfg_size =
+ sizeof(struct asm_multi_channel_pcm_fmt_blk);
+ enc_cfg.enc_blk.cfg.mpcm.num_channels = channels;
+ enc_cfg.enc_blk.cfg.mpcm.bits_per_sample = 16;
+ enc_cfg.enc_blk.cfg.mpcm.sample_rate = rate;
+ enc_cfg.enc_blk.cfg.mpcm.is_signed = 1;
+ enc_cfg.enc_blk.cfg.mpcm.is_interleaved = 1;
+ if (channels == 1) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 2) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 4) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 6) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = 0;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = 0;
+ } else if (channels == 8) {
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[0] = PCM_CHANNEL_FL;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[1] = PCM_CHANNEL_FR;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[2] = PCM_CHANNEL_LFE;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[3] = PCM_CHANNEL_FC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[4] = PCM_CHANNEL_LB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[5] = PCM_CHANNEL_RB;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[6] = PCM_CHANNEL_FLC;
+ enc_cfg.enc_blk.cfg.mpcm.channel_mapping[7] = PCM_CHANNEL_FRC;
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd open failed\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", enc_cfg.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enable_sbrps(struct audio_client *ac,
+ uint32_t sbr_ps_enable)
+{
+ struct asm_stream_cmd_encdec_sbr sbrps;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d\n", __func__, ac->session);
+
+ q6asm_add_hdr(ac, &sbrps.hdr, sizeof(sbrps), TRUE);
+
+ sbrps.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ sbrps.param_id = ASM_ENABLE_SBR_PS;
+ sbrps.param_size = sizeof(struct asm_sbr_ps);
+ sbrps.sbr_ps.enable = sbr_ps_enable;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &sbrps);
+ if (rc < 0) {
+ pr_err("Command opcode[0x%x]paramid[0x%x] failed\n",
+ ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_ENABLE_SBR_PS);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout opcode[0x%x] ", sbrps.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_cfg_dual_mono_aac(struct audio_client *ac,
+ uint16_t sce_left, uint16_t sce_right)
+{
+ struct asm_stream_cmd_encdec_dualmono dual_mono;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, sce_left = %d, sce_right = %d\n",
+ __func__, ac->session, sce_left, sce_right);
+
+ q6asm_add_hdr(ac, &dual_mono.hdr, sizeof(dual_mono), TRUE);
+
+ dual_mono.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ dual_mono.param_id = ASM_CONFIGURE_DUAL_MONO;
+ dual_mono.param_size = sizeof(struct asm_dual_mono);
+ dual_mono.channel_map.sce_left = sce_left;
+ dual_mono.channel_map.sce_right = sce_right;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &dual_mono);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_CONFIGURE_DUAL_MONO);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n", __func__,
+ dual_mono.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_cfg_aac_sel_mix_coef(struct audio_client *ac, uint32_t mix_coeff)
+{
+ struct asm_aac_stereo_mix_coeff_selection_param aac_mix_coeff;
+ int rc = 0;
+ q6asm_add_hdr(ac, &aac_mix_coeff.hdr, sizeof(aac_mix_coeff), TRUE);
+ aac_mix_coeff.hdr.opcode =
+ ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ aac_mix_coeff.param_id =
+ ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG;
+ aac_mix_coeff.param_size =
+ sizeof(struct asm_aac_stereo_mix_coeff_selection_param);
+ aac_mix_coeff.aac_stereo_mix_coeff_flag = mix_coeff;
+ pr_debug("%s, mix_coeff = %u", __func__, mix_coeff);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &aac_mix_coeff);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_PARAM_ID_AAC_STEREO_MIX_COEFF_SELECTION_FLAG);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n", __func__,
+ aac_mix_coeff.hdr.opcode);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_set_encdec_chan_map(struct audio_client *ac,
+ uint32_t num_channels)
+{
+ struct asm_stream_cmd_encdec_channelmap chan_map;
+ u8 *channel_mapping;
+
+ int rc = 0;
+
+ pr_debug("%s: Session %d, num_channels = %d\n",
+ __func__, ac->session, num_channels);
+
+ q6asm_add_hdr(ac, &chan_map.hdr, sizeof(chan_map), TRUE);
+
+ chan_map.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+ chan_map.param_id = ASM_ENCDEC_DEC_CHAN_MAP;
+ chan_map.param_size = sizeof(struct asm_dec_chan_map);
+ chan_map.chan_map.num_channels = num_channels;
+
+ channel_mapping =
+ chan_map.chan_map.channel_mapping;
+
+ memset(channel_mapping, PCM_CHANNEL_NULL, MAX_CHAN_MAP_CHANNELS);
+ if (num_channels == 1) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ } else if (num_channels == 2) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_mapping[1] = PCM_CHANNEL_FR;
+ } else if (num_channels == 4) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_mapping[1] = PCM_CHANNEL_FR;
+ channel_mapping[1] = PCM_CHANNEL_LB;
+ channel_mapping[1] = PCM_CHANNEL_RB;
+ } else if (num_channels == 6) {
+ channel_mapping[0] = PCM_CHANNEL_FC;
+ channel_mapping[1] = PCM_CHANNEL_FL;
+ channel_mapping[2] = PCM_CHANNEL_FR;
+ channel_mapping[3] = PCM_CHANNEL_LB;
+ channel_mapping[4] = PCM_CHANNEL_RB;
+ channel_mapping[5] = PCM_CHANNEL_LFE;
+ } else if (num_channels == 8) {
+ channel_mapping[0] = PCM_CHANNEL_FC;
+ channel_mapping[1] = PCM_CHANNEL_FL;
+ channel_mapping[2] = PCM_CHANNEL_FR;
+ channel_mapping[3] = PCM_CHANNEL_LB;
+ channel_mapping[4] = PCM_CHANNEL_RB;
+ channel_mapping[5] = PCM_CHANNEL_LFE;
+ channel_mapping[6] = PCM_CHANNEL_FLC;
+ channel_mapping[7] = PCM_CHANNEL_FRC;
+ } else {
+ pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
+ num_channels);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &chan_map);
+ if (rc < 0) {
+ pr_err("%s:Command opcode[0x%x]paramid[0x%x] failed\n",
+ __func__, ASM_STREAM_CMD_SET_ENCDEC_PARAM,
+ ASM_ENCDEC_DEC_CHAN_MAP);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout opcode[0x%x]\n", __func__,
+ chan_map.hdr.opcode);
+ rc = -ETIMEDOUT;
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return rc;
+}
+
+int q6asm_enc_cfg_blk_qcelp(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t min_rate, uint16_t max_rate,
+ uint16_t reduced_rate_level, uint16_t rate_modulation_cmd)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] reduced_rate_level[0x%4x]rate_modulation_cmd[0x%4x]",
+ __func__,
+ ac->session, frames_per_buf, min_rate, max_rate,
+ reduced_rate_level, rate_modulation_cmd);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+
+ enc_cfg.enc_blk.frames_per_buf = frames_per_buf;
+ enc_cfg.enc_blk.format_id = V13K_FS;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_qcelp13_read_cfg);
+ enc_cfg.enc_blk.cfg.qcelp13.min_rate = min_rate;
+ enc_cfg.enc_blk.cfg.qcelp13.max_rate = max_rate;
+ enc_cfg.enc_blk.cfg.qcelp13.reduced_rate_level = reduced_rate_level;
+ enc_cfg.enc_blk.cfg.qcelp13.rate_modulation_cmd = rate_modulation_cmd;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for FORMAT_UPDATE\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_evrc(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t min_rate, uint16_t max_rate,
+ uint16_t rate_modulation_cmd)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]frames[%d]min_rate[0x%4x]max_rate[0x%4x] rate_modulation_cmd[0x%4x]",
+ __func__, ac->session,
+ frames_per_buf, min_rate, max_rate, rate_modulation_cmd);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+
+ enc_cfg.enc_blk.frames_per_buf = frames_per_buf;
+ enc_cfg.enc_blk.format_id = EVRC_FS;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_evrc_read_cfg);
+ enc_cfg.enc_blk.cfg.evrc.min_rate = min_rate;
+ enc_cfg.enc_blk.cfg.evrc.max_rate = max_rate;
+ enc_cfg.enc_blk.cfg.evrc.rate_modulation_cmd = rate_modulation_cmd;
+ enc_cfg.enc_blk.cfg.evrc.reserved = 0;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for FORMAT_UPDATE\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_amrnb(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t band_mode, uint16_t dtx_enable)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
+ __func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+
+ enc_cfg.enc_blk.frames_per_buf = frames_per_buf;
+ enc_cfg.enc_blk.format_id = AMRNB_FS;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrnb_read_cfg);
+ enc_cfg.enc_blk.cfg.amrnb.mode = band_mode;
+ enc_cfg.enc_blk.cfg.amrnb.dtx_mode = dtx_enable;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for FORMAT_UPDATE\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_enc_cfg_blk_amrwb(struct audio_client *ac, uint32_t frames_per_buf,
+ uint16_t band_mode, uint16_t dtx_enable)
+{
+ struct asm_stream_cmd_encdec_cfg_blk enc_cfg;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]frames[%d]band_mode[0x%4x]dtx_enable[0x%4x]",
+ __func__, ac->session, frames_per_buf, band_mode, dtx_enable);
+
+ q6asm_add_hdr(ac, &enc_cfg.hdr, sizeof(enc_cfg), TRUE);
+
+ enc_cfg.hdr.opcode = ASM_STREAM_CMD_SET_ENCDEC_PARAM;
+
+ enc_cfg.param_id = ASM_ENCDEC_CFG_BLK_ID;
+ enc_cfg.param_size = sizeof(struct asm_encode_cfg_blk);
+
+ enc_cfg.enc_blk.frames_per_buf = frames_per_buf;
+ enc_cfg.enc_blk.format_id = AMRWB_FS;
+ enc_cfg.enc_blk.cfg_size = sizeof(struct asm_amrwb_read_cfg);
+ enc_cfg.enc_blk.cfg.amrwb.mode = band_mode;
+ enc_cfg.enc_blk.cfg.amrwb.dtx_mode = dtx_enable;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &enc_cfg);
+ if (rc < 0) {
+ pr_err("Comamnd %d failed\n", ASM_STREAM_CMD_SET_ENCDEC_PARAM);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for FORMAT_UPDATE\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_media_format_update fmt;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
+ channels);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = LINEAR_PCM;
+ fmt.cfg_size = sizeof(struct asm_pcm_cfg);
+ fmt.write_cfg.pcm_cfg.ch_cfg = channels;
+ fmt.write_cfg.pcm_cfg.bits_per_sample = 16;
+ fmt.write_cfg.pcm_cfg.sample_rate = rate;
+ fmt.write_cfg.pcm_cfg.is_signed = 1;
+ fmt.write_cfg.pcm_cfg.interleaved = 1;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_multi_ch_pcm(struct audio_client *ac,
+ uint32_t rate, uint32_t channels)
+{
+ struct asm_stream_media_format_update fmt;
+ u8 *channel_mapping;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session, rate,
+ channels);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = MULTI_CHANNEL_PCM;
+ fmt.cfg_size = sizeof(struct asm_multi_channel_pcm_fmt_blk);
+ fmt.write_cfg.multi_ch_pcm_cfg.num_channels = channels;
+ fmt.write_cfg.multi_ch_pcm_cfg.bits_per_sample = 16;
+ fmt.write_cfg.multi_ch_pcm_cfg.sample_rate = rate;
+ fmt.write_cfg.multi_ch_pcm_cfg.is_signed = 1;
+ fmt.write_cfg.multi_ch_pcm_cfg.is_interleaved = 1;
+ channel_mapping =
+ fmt.write_cfg.multi_ch_pcm_cfg.channel_mapping;
+
+ memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL);
+
+ if (channels == 1) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ } else if (channels == 2) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_mapping[1] = PCM_CHANNEL_FR;
+ } else if (channels == 4) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_mapping[1] = PCM_CHANNEL_FR;
+ channel_mapping[1] = PCM_CHANNEL_LB;
+ channel_mapping[1] = PCM_CHANNEL_RB;
+ } else if (channels == 6) {
+ channel_mapping[0] = PCM_CHANNEL_FL;
+ channel_mapping[1] = PCM_CHANNEL_FR;
+ channel_mapping[2] = PCM_CHANNEL_FC;
+ channel_mapping[3] = PCM_CHANNEL_LFE;
+ channel_mapping[4] = PCM_CHANNEL_LB;
+ channel_mapping[5] = PCM_CHANNEL_RB;
+ } else if (channels == 8) {
+ channel_mapping[0] = PCM_CHANNEL_FC;
+ channel_mapping[1] = PCM_CHANNEL_FL;
+ channel_mapping[2] = PCM_CHANNEL_FR;
+ channel_mapping[3] = PCM_CHANNEL_LB;
+ channel_mapping[4] = PCM_CHANNEL_RB;
+ channel_mapping[5] = PCM_CHANNEL_LFE;
+ channel_mapping[6] = PCM_CHANNEL_FLC;
+ channel_mapping[7] = PCM_CHANNEL_FRC;
+ } else {
+ pr_err("%s: ERROR.unsupported num_ch = %u\n", __func__,
+ channels);
+ return -EINVAL;
+ }
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_aac(struct audio_client *ac,
+ struct asm_aac_cfg *cfg)
+{
+ struct asm_stream_media_format_update fmt;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session,
+ cfg->sample_rate, cfg->ch_cfg);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = MPEG4_AAC;
+ fmt.cfg_size = sizeof(struct asm_aac_cfg);
+ fmt.write_cfg.aac_cfg.format = cfg->format;
+ fmt.write_cfg.aac_cfg.aot = cfg->aot;
+ fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config;
+ fmt.write_cfg.aac_cfg.section_data_resilience =
+ cfg->section_data_resilience;
+ fmt.write_cfg.aac_cfg.scalefactor_data_resilience =
+ cfg->scalefactor_data_resilience;
+ fmt.write_cfg.aac_cfg.spectral_data_resilience =
+ cfg->spectral_data_resilience;
+ fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg;
+ fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate;
+ pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n",
+ __func__, fmt.format, fmt.cfg_size,
+ fmt.write_cfg.aac_cfg.format,
+ fmt.write_cfg.aac_cfg.aot,
+ fmt.write_cfg.aac_cfg.ch_cfg,
+ fmt.write_cfg.aac_cfg.sample_rate);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_amrwbplus(struct audio_client *ac,
+ struct asm_amrwbplus_cfg *cfg)
+{
+ struct asm_stream_media_format_update fmt;
+ int rc = 0;
+ pr_debug("q6asm_media_format_block_amrwbplus");
+
+ pr_debug("%s:session[%d]band-mode[%d]frame-fmt[%d]ch[%d]\n",
+ __func__,
+ ac->session,
+ cfg->amr_band_mode,
+ cfg->amr_frame_fmt,
+ cfg->num_channels);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = AMR_WB_PLUS;
+ fmt.cfg_size = cfg->size_bytes;
+
+ fmt.write_cfg.amrwbplus_cfg.size_bytes = cfg->size_bytes;
+ fmt.write_cfg.amrwbplus_cfg.version = cfg->version;
+ fmt.write_cfg.amrwbplus_cfg.num_channels = cfg->num_channels;
+ fmt.write_cfg.amrwbplus_cfg.amr_band_mode = cfg->amr_band_mode;
+ fmt.write_cfg.amrwbplus_cfg.amr_dtx_mode = cfg->amr_dtx_mode;
+ fmt.write_cfg.amrwbplus_cfg.amr_frame_fmt = cfg->amr_frame_fmt;
+ fmt.write_cfg.amrwbplus_cfg.amr_lsf_idx = cfg->amr_lsf_idx;
+
+ pr_debug("%s: num_channels=%x amr_band_mode=%d amr_frame_fmt=%d\n",
+ __func__,
+ cfg->num_channels,
+ cfg->amr_band_mode,
+ cfg->amr_frame_fmt);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd media format update failed..\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+int q6asm_media_format_block_multi_aac(struct audio_client *ac,
+ struct asm_aac_cfg *cfg)
+{
+ struct asm_stream_media_format_update fmt;
+ int rc = 0;
+
+ pr_debug("%s:session[%d]rate[%d]ch[%d]\n", __func__, ac->session,
+ cfg->sample_rate, cfg->ch_cfg);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = MPEG4_MULTI_AAC;
+ fmt.cfg_size = sizeof(struct asm_aac_cfg);
+ fmt.write_cfg.aac_cfg.format = cfg->format;
+ fmt.write_cfg.aac_cfg.aot = cfg->aot;
+ fmt.write_cfg.aac_cfg.ep_config = cfg->ep_config;
+ fmt.write_cfg.aac_cfg.section_data_resilience =
+ cfg->section_data_resilience;
+ fmt.write_cfg.aac_cfg.scalefactor_data_resilience =
+ cfg->scalefactor_data_resilience;
+ fmt.write_cfg.aac_cfg.spectral_data_resilience =
+ cfg->spectral_data_resilience;
+ fmt.write_cfg.aac_cfg.ch_cfg = cfg->ch_cfg;
+ fmt.write_cfg.aac_cfg.sample_rate = cfg->sample_rate;
+ pr_info("%s:format=%x cfg_size=%d aac-cfg=%x aot=%d ch=%d sr=%d\n",
+ __func__, fmt.format, fmt.cfg_size,
+ fmt.write_cfg.aac_cfg.format,
+ fmt.write_cfg.aac_cfg.aot,
+ fmt.write_cfg.aac_cfg.ch_cfg,
+ fmt.write_cfg.aac_cfg.sample_rate);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+
+
+int q6asm_media_format_block(struct audio_client *ac, uint32_t format)
+{
+
+ struct asm_stream_media_format_update fmt;
+ int rc = 0;
+
+ pr_debug("%s:session[%d] format[0x%x]\n", __func__,
+ ac->session, format);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+ switch (format) {
+ case FORMAT_V13K:
+ fmt.format = V13K_FS;
+ break;
+ case FORMAT_EVRC:
+ fmt.format = EVRC_FS;
+ break;
+ case FORMAT_AMRWB:
+ fmt.format = AMRWB_FS;
+ break;
+ case FORMAT_AMR_WB_PLUS:
+ fmt.format = AMR_WB_PLUS;
+ break;
+ case FORMAT_AMRNB:
+ fmt.format = AMRNB_FS;
+ break;
+ case FORMAT_MP3:
+ fmt.format = MP3;
+ break;
+ case FORMAT_DTS:
+ fmt.format = DTS;
+ break;
+ case FORMAT_DTS_LBR:
+ fmt.format = DTS_LBR;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", format);
+ goto fail_cmd;
+ }
+ fmt.cfg_size = 0;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_wma(struct audio_client *ac,
+ void *cfg)
+{
+ struct asm_stream_media_format_update fmt;
+ struct asm_wma_cfg *wma_cfg = (struct asm_wma_cfg *)cfg;
+ int rc = 0;
+
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x]\n",
+ ac->session, wma_cfg->format_tag, wma_cfg->sample_rate,
+ wma_cfg->ch_cfg, wma_cfg->avg_bytes_per_sec,
+ wma_cfg->block_align, wma_cfg->valid_bits_per_sample,
+ wma_cfg->ch_mask, wma_cfg->encode_opt);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = WMA_V9;
+ fmt.cfg_size = sizeof(struct asm_wma_cfg);
+ fmt.write_cfg.wma_cfg.format_tag = wma_cfg->format_tag;
+ fmt.write_cfg.wma_cfg.ch_cfg = wma_cfg->ch_cfg;
+ fmt.write_cfg.wma_cfg.sample_rate = wma_cfg->sample_rate;
+ fmt.write_cfg.wma_cfg.avg_bytes_per_sec = wma_cfg->avg_bytes_per_sec;
+ fmt.write_cfg.wma_cfg.block_align = wma_cfg->block_align;
+ fmt.write_cfg.wma_cfg.valid_bits_per_sample =
+ wma_cfg->valid_bits_per_sample;
+ fmt.write_cfg.wma_cfg.ch_mask = wma_cfg->ch_mask;
+ fmt.write_cfg.wma_cfg.encode_opt = wma_cfg->encode_opt;
+ fmt.write_cfg.wma_cfg.adv_encode_opt = 0;
+ fmt.write_cfg.wma_cfg.adv_encode_opt2 = 0;
+ fmt.write_cfg.wma_cfg.drc_peak_ref = 0;
+ fmt.write_cfg.wma_cfg.drc_peak_target = 0;
+ fmt.write_cfg.wma_cfg.drc_ave_ref = 0;
+ fmt.write_cfg.wma_cfg.drc_ave_target = 0;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_media_format_block_wmapro(struct audio_client *ac,
+ void *cfg)
+{
+ struct asm_stream_media_format_update fmt;
+ struct asm_wmapro_cfg *wmapro_cfg = (struct asm_wmapro_cfg *)cfg;
+ int rc = 0;
+
+ pr_debug("session[%d]format_tag[0x%4x] rate[%d] ch[0x%4x] bps[%d], balign[0x%4x], bit_sample[0x%4x], ch_msk[%d], enc_opt[0x%4x], adv_enc_opt[0x%4x], adv_enc_opt2[0x%8x]\n",
+ ac->session, wmapro_cfg->format_tag, wmapro_cfg->sample_rate,
+ wmapro_cfg->ch_cfg, wmapro_cfg->avg_bytes_per_sec,
+ wmapro_cfg->block_align, wmapro_cfg->valid_bits_per_sample,
+ wmapro_cfg->ch_mask, wmapro_cfg->encode_opt,
+ wmapro_cfg->adv_encode_opt, wmapro_cfg->adv_encode_opt2);
+
+ q6asm_add_hdr(ac, &fmt.hdr, sizeof(fmt), TRUE);
+
+ fmt.hdr.opcode = ASM_DATA_CMD_MEDIA_FORMAT_UPDATE;
+
+ fmt.format = WMA_V10PRO;
+ fmt.cfg_size = sizeof(struct asm_wmapro_cfg);
+ fmt.write_cfg.wmapro_cfg.format_tag = wmapro_cfg->format_tag;
+ fmt.write_cfg.wmapro_cfg.ch_cfg = wmapro_cfg->ch_cfg;
+ fmt.write_cfg.wmapro_cfg.sample_rate = wmapro_cfg->sample_rate;
+ fmt.write_cfg.wmapro_cfg.avg_bytes_per_sec =
+ wmapro_cfg->avg_bytes_per_sec;
+ fmt.write_cfg.wmapro_cfg.block_align = wmapro_cfg->block_align;
+ fmt.write_cfg.wmapro_cfg.valid_bits_per_sample =
+ wmapro_cfg->valid_bits_per_sample;
+ fmt.write_cfg.wmapro_cfg.ch_mask = wmapro_cfg->ch_mask;
+ fmt.write_cfg.wmapro_cfg.encode_opt = wmapro_cfg->encode_opt;
+ fmt.write_cfg.wmapro_cfg.adv_encode_opt = wmapro_cfg->adv_encode_opt;
+ fmt.write_cfg.wmapro_cfg.adv_encode_opt2 = wmapro_cfg->adv_encode_opt2;
+ fmt.write_cfg.wmapro_cfg.drc_peak_ref = 0;
+ fmt.write_cfg.wmapro_cfg.drc_peak_target = 0;
+ fmt.write_cfg.wmapro_cfg.drc_ave_ref = 0;
+ fmt.write_cfg.wmapro_cfg.drc_ave_target = 0;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &fmt);
+ if (rc < 0) {
+ pr_err("%s:Comamnd open failed\n", __func__);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s:timeout. waited for FORMAT_UPDATE\n", __func__);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_memory_map(struct audio_client *ac, uint32_t buf_add, int dir,
+ uint32_t bufsz, uint32_t bufcnt)
+{
+ struct asm_stream_cmd_memory_map mem_map;
+ int rc = 0;
+
+ if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+
+ pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+ mem_map.hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP;
+
+ mem_map.buf_add = buf_add;
+ mem_map.buf_size = bufsz * bufcnt;
+ mem_map.mempool_id = 0; /* EBI */
+ mem_map.reserved = 0;
+
+ q6asm_add_mmaphdr(&mem_map.hdr,
+ sizeof(struct asm_stream_cmd_memory_map), TRUE);
+
+ pr_debug("buf add[%x] buf_add_parameter[%x]\n",
+ mem_map.buf_add, buf_add);
+
+ rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_map);
+ if (rc < 0) {
+ pr_err("mem_map op[0x%x]rc[%d]\n",
+ mem_map.hdr.opcode, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(this_mmap.cmd_wait,
+ (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ);
+ if (!rc) {
+ pr_err("timeout. waited for memory_map\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ return rc;
+}
+
+int q6asm_memory_unmap(struct audio_client *ac, uint32_t buf_add, int dir)
+{
+ struct asm_stream_cmd_memory_unmap mem_unmap;
+ int rc = 0;
+
+ if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+ q6asm_add_mmaphdr(&mem_unmap.hdr,
+ sizeof(struct asm_stream_cmd_memory_unmap), TRUE);
+ mem_unmap.hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP;
+ mem_unmap.buf_add = buf_add;
+
+ rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap);
+ if (rc < 0) {
+ pr_err("mem_unmap op[0x%x]rc[%d]\n",
+ mem_unmap.hdr.opcode, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(this_mmap.cmd_wait,
+ (atomic_read(&this_mmap.cmd_state) == 0), 5 * HZ);
+ if (!rc) {
+ pr_err("timeout. waited for memory_map\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ return rc;
+}
+
+int q6asm_set_lrgain(struct audio_client *ac, int left_gain, int right_gain)
+{
+ void *vol_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_lrchannel_gain_params *lrgain = NULL;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_lrchannel_gain_params);
+ vol_cmd = kzalloc(sz, GFP_KERNEL);
+ if (vol_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ return rc;
+ }
+ cmd = (struct asm_pp_params_command *)vol_cmd;
+ q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_lrchannel_gain_params);
+ cmd->params.module_id = VOLUME_CONTROL_MODULE_ID;
+ cmd->params.param_id = L_R_CHANNEL_GAIN_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_lrchannel_gain_params);
+ cmd->params.reserved = 0;
+
+ payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command));
+ lrgain = (struct asm_lrchannel_gain_params *)payload;
+
+ lrgain->left_gain = left_gain;
+ lrgain->right_gain = right_gain;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
+ if (rc < 0) {
+ pr_err("%s: Volume Command failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending volume command to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(vol_cmd);
+ return rc;
+}
+
+static int q6asm_memory_map_regions(struct audio_client *ac, int dir,
+ uint32_t bufsz, uint32_t bufcnt)
+{
+ struct asm_stream_cmd_memory_map_regions *mmap_regions = NULL;
+ struct asm_memory_map_regions *mregions = NULL;
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *mmap_region_cmd = NULL;
+ void *payload = NULL;
+ int rc = 0;
+ int i = 0;
+ int cmd_size = 0;
+
+ if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+ cmd_size = sizeof(struct asm_stream_cmd_memory_map_regions)
+ + sizeof(struct asm_memory_map_regions) * bufcnt;
+
+ mmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (mmap_region_cmd == NULL) {
+ pr_err("%s: Mem alloc failed\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ mmap_regions = (struct asm_stream_cmd_memory_map_regions *)
+ mmap_region_cmd;
+ q6asm_add_mmaphdr(&mmap_regions->hdr, cmd_size, TRUE);
+ mmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_MAP_REGIONS;
+ mmap_regions->mempool_id = 0;
+ mmap_regions->nregions = bufcnt & 0x00ff;
+ pr_debug("map_regions->nregions = %d\n", mmap_regions->nregions);
+ payload = ((u8 *) mmap_region_cmd +
+ sizeof(struct asm_stream_cmd_memory_map_regions));
+ mregions = (struct asm_memory_map_regions *)payload;
+
+ port = &ac->port[dir];
+ for (i = 0; i < bufcnt; i++) {
+ ab = &port->buf[i];
+ mregions->phys = ab->phys;
+ mregions->buf_size = ab->size;
+ ++mregions;
+ }
+
+ rc = apr_send_pkt(this_mmap.apr, (uint32_t *) mmap_region_cmd);
+ if (rc < 0) {
+ pr_err("mmap_regions op[0x%x]rc[%d]\n",
+ mmap_regions->hdr.opcode, rc);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(this_mmap.cmd_wait,
+ (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for memory_map\n");
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(mmap_region_cmd);
+ return rc;
+}
+
+static int q6asm_memory_unmap_regions(struct audio_client *ac, int dir,
+ uint32_t bufsz, uint32_t bufcnt)
+{
+ struct asm_stream_cmd_memory_unmap_regions *unmap_regions = NULL;
+ struct asm_memory_unmap_regions *mregions = NULL;
+ struct audio_port_data *port = NULL;
+ struct audio_buffer *ab = NULL;
+ void *unmap_region_cmd = NULL;
+ void *payload = NULL;
+ int rc = 0;
+ int i = 0;
+ int cmd_size = 0;
+
+ if (!ac || ac->apr == NULL || this_mmap.apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: Session[%d]\n", __func__, ac->session);
+
+ cmd_size = sizeof(struct asm_stream_cmd_memory_unmap_regions) +
+ sizeof(struct asm_memory_unmap_regions) * bufcnt;
+
+ unmap_region_cmd = kzalloc(cmd_size, GFP_KERNEL);
+ if (unmap_region_cmd == NULL) {
+ pr_err("%s: Mem alloc failed\n", __func__);
+ rc = -EINVAL;
+ return rc;
+ }
+ unmap_regions = (struct asm_stream_cmd_memory_unmap_regions *)
+ unmap_region_cmd;
+ q6asm_add_mmaphdr(&unmap_regions->hdr, cmd_size, TRUE);
+ unmap_regions->hdr.opcode = ASM_SESSION_CMD_MEMORY_UNMAP_REGIONS;
+ unmap_regions->nregions = bufcnt & 0x00ff;
+ pr_debug("unmap_regions->nregions = %d\n", unmap_regions->nregions);
+ payload = ((u8 *) unmap_region_cmd +
+ sizeof(struct asm_stream_cmd_memory_unmap_regions));
+ mregions = (struct asm_memory_unmap_regions *)payload;
+ port = &ac->port[dir];
+ for (i = 0; i < bufcnt; i++) {
+ ab = &port->buf[i];
+ mregions->phys = ab->phys;
+ ++mregions;
+ }
+
+ rc = apr_send_pkt(this_mmap.apr, (uint32_t *) unmap_region_cmd);
+ if (rc < 0) {
+ pr_err("mmap_regions op[0x%x]rc[%d]\n",
+ unmap_regions->hdr.opcode, rc);
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(this_mmap.cmd_wait,
+ (atomic_read(&this_mmap.cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for memory_unmap\n");
+ goto fail_cmd;
+ }
+ rc = 0;
+
+fail_cmd:
+ kfree(unmap_region_cmd);
+ return rc;
+}
+
+int q6asm_set_mute(struct audio_client *ac, int muteflag)
+{
+ void *vol_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_mute_params *mute = NULL;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_mute_params);
+ vol_cmd = kzalloc(sz, GFP_KERNEL);
+ if (vol_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ return rc;
+ }
+ cmd = (struct asm_pp_params_command *)vol_cmd;
+ q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_mute_params);
+ cmd->params.module_id = VOLUME_CONTROL_MODULE_ID;
+ cmd->params.param_id = MUTE_CONFIG_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_mute_params);
+ cmd->params.reserved = 0;
+
+ payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command));
+ mute = (struct asm_mute_params *)payload;
+
+ mute->muteflag = muteflag;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
+ if (rc < 0) {
+ pr_err("%s: Mute Command failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending mute command to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(vol_cmd);
+ return rc;
+}
+
+int q6asm_set_volume(struct audio_client *ac, int volume)
+{
+ void *vol_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_master_gain_params *mgain = NULL;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_master_gain_params);
+ vol_cmd = kzalloc(sz, GFP_KERNEL);
+ if (vol_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ return rc;
+ }
+ cmd = (struct asm_pp_params_command *)vol_cmd;
+ q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_master_gain_params);
+ cmd->params.module_id = VOLUME_CONTROL_MODULE_ID;
+ cmd->params.param_id = MASTER_GAIN_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_master_gain_params);
+ cmd->params.reserved = 0;
+
+ payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command));
+ mgain = (struct asm_master_gain_params *)payload;
+
+ mgain->master_gain = volume;
+ mgain->padding = 0x00;
+ rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
+ if (rc < 0) {
+ pr_err("%s: Volume Command failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending volume command to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(vol_cmd);
+ return rc;
+}
+
+int q6asm_set_softpause(struct audio_client *ac,
+ struct asm_softpause_params *pause_param)
+{
+ void *vol_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_softpause_params *params = NULL;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_softpause_params);
+ vol_cmd = kzalloc(sz, GFP_KERNEL);
+ if (vol_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ return rc;
+ }
+ cmd = (struct asm_pp_params_command *)vol_cmd;
+ q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_softpause_params);
+ cmd->params.module_id = VOLUME_CONTROL_MODULE_ID;
+ cmd->params.param_id = SOFT_PAUSE_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_softpause_params);
+ cmd->params.reserved = 0;
+
+ payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command));
+ params = (struct asm_softpause_params *)payload;
+
+ params->enable = pause_param->enable;
+ params->period = pause_param->period;
+ params->step = pause_param->step;
+ params->rampingcurve = pause_param->rampingcurve;
+ pr_debug("%s: soft Pause Command: enable = %d, period = %d, step = %d, curve = %d\n",
+ __func__, params->enable,
+ params->period, params->step, params->rampingcurve);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
+ if (rc < 0) {
+ pr_err("%s: Volume Command(soft_pause) failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending volume command(soft_pause) to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(vol_cmd);
+ return rc;
+}
+
+int q6asm_set_softvolume(struct audio_client *ac,
+ struct asm_softvolume_params *softvol_param)
+{
+ void *vol_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_softvolume_params *params = NULL;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_softvolume_params);
+ vol_cmd = kzalloc(sz, GFP_KERNEL);
+ if (vol_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ return rc;
+ }
+ cmd = (struct asm_pp_params_command *)vol_cmd;
+ q6asm_add_hdr_async(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_softvolume_params);
+ cmd->params.module_id = VOLUME_CONTROL_MODULE_ID;
+ cmd->params.param_id = SOFT_VOLUME_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_softvolume_params);
+ cmd->params.reserved = 0;
+
+ payload = (u8 *)(vol_cmd + sizeof(struct asm_pp_params_command));
+ params = (struct asm_softvolume_params *)payload;
+
+ params->period = softvol_param->period;
+ params->step = softvol_param->step;
+ params->rampingcurve = softvol_param->rampingcurve;
+ pr_debug("%s: soft Volume:opcode = %d,payload_sz =%d,module_id =%d, param_id = %d, param_sz = %d\n",
+ __func__,
+ cmd->hdr.opcode, cmd->payload_size,
+ cmd->params.module_id, cmd->params.param_id,
+ cmd->params.param_size);
+ pr_debug("%s: soft Volume Command: period = %d, step = %d, curve = %d\n",
+ __func__, params->period,
+ params->step, params->rampingcurve);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) vol_cmd);
+ if (rc < 0) {
+ pr_err("%s: Volume Command(soft_volume) failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending volume command(soft_volume) to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(vol_cmd);
+ return rc;
+}
+
+int q6asm_equalizer(struct audio_client *ac, void *eq)
+{
+ void *eq_cmd = NULL;
+ void *payload = NULL;
+ struct asm_pp_params_command *cmd = NULL;
+ struct asm_equalizer_params *equalizer = NULL;
+ struct msm_audio_eq_stream_config *eq_params = NULL;
+ int i = 0;
+ int sz = 0;
+ int rc = 0;
+
+ sz = sizeof(struct asm_pp_params_command) +
+ + sizeof(struct asm_equalizer_params);
+ eq_cmd = kzalloc(sz, GFP_KERNEL);
+ if (eq_cmd == NULL) {
+ pr_err("%s[%d]: Mem alloc failed\n", __func__, ac->session);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ eq_params = (struct msm_audio_eq_stream_config *) eq;
+ cmd = (struct asm_pp_params_command *)eq_cmd;
+ q6asm_add_hdr(ac, &cmd->hdr, sz, TRUE);
+ cmd->hdr.opcode = ASM_STREAM_CMD_SET_PP_PARAMS;
+ cmd->payload = NULL;
+ cmd->payload_size = sizeof(struct asm_pp_param_data_hdr) +
+ sizeof(struct asm_equalizer_params);
+ cmd->params.module_id = EQUALIZER_MODULE_ID;
+ cmd->params.param_id = EQUALIZER_PARAM_ID;
+ cmd->params.param_size = sizeof(struct asm_equalizer_params);
+ cmd->params.reserved = 0;
+ payload = (u8 *)(eq_cmd + sizeof(struct asm_pp_params_command));
+ equalizer = (struct asm_equalizer_params *)payload;
+
+ equalizer->enable = eq_params->enable;
+ equalizer->num_bands = eq_params->num_bands;
+ pr_debug("%s: enable:%d numbands:%d\n", __func__, eq_params->enable,
+ eq_params->num_bands);
+ for (i = 0; i < eq_params->num_bands; i++) {
+ equalizer->eq_bands[i].band_idx =
+ eq_params->eq_bands[i].band_idx;
+ equalizer->eq_bands[i].filter_type =
+ eq_params->eq_bands[i].filter_type;
+ equalizer->eq_bands[i].center_freq_hz =
+ eq_params->eq_bands[i].center_freq_hz;
+ equalizer->eq_bands[i].filter_gain =
+ eq_params->eq_bands[i].filter_gain;
+ equalizer->eq_bands[i].q_factor =
+ eq_params->eq_bands[i].q_factor;
+ pr_debug("%s: filter_type:%u bandnum:%d\n", __func__,
+ eq_params->eq_bands[i].filter_type, i);
+ pr_debug("%s: center_freq_hz:%u bandnum:%d\n", __func__,
+ eq_params->eq_bands[i].center_freq_hz, i);
+ pr_debug("%s: filter_gain:%d bandnum:%d\n", __func__,
+ eq_params->eq_bands[i].filter_gain, i);
+ pr_debug("%s: q_factor:%d bandnum:%d\n", __func__,
+ eq_params->eq_bands[i].q_factor, i);
+ }
+ rc = apr_send_pkt(ac->apr, (uint32_t *) eq_cmd);
+ if (rc < 0) {
+ pr_err("%s: Equalizer Command failed\n", __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in sending equalizer command to apr\n",
+ __func__);
+ rc = -EINVAL;
+ goto fail_cmd;
+ }
+ rc = 0;
+fail_cmd:
+ kfree(eq_cmd);
+ return rc;
+}
+
+int q6asm_read(struct audio_client *ac)
+{
+ struct asm_stream_cmd_read read;
+ struct audio_buffer *ab;
+ int dsp_buf;
+ struct audio_port_data *port;
+ int rc;
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[OUT];
+
+ q6asm_add_hdr(ac, &read.hdr, sizeof(read), FALSE);
+
+ mutex_lock(&port->lock);
+
+ dsp_buf = port->dsp_buf;
+ ab = &port->buf[dsp_buf];
+
+ pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
+ __func__,
+ ac->session,
+ dsp_buf,
+ (void *)port->buf[dsp_buf].data,
+ port->cpu_buf,
+ (void *)port->buf[port->cpu_buf].phys);
+
+ read.hdr.opcode = ASM_DATA_CMD_READ;
+ read.buf_add = ab->phys;
+ read.buf_size = ab->size;
+ read.uid = port->dsp_buf;
+ read.hdr.token = port->dsp_buf;
+
+ port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+ mutex_unlock(&port->lock);
+ pr_debug("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
+ read.buf_add,
+ read.hdr.token,
+ read.uid);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+ }
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_read_nolock(struct audio_client *ac)
+{
+ struct asm_stream_cmd_read read;
+ struct audio_buffer *ab;
+ int dsp_buf;
+ struct audio_port_data *port;
+ int rc;
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[OUT];
+
+ q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+
+ dsp_buf = port->dsp_buf;
+ ab = &port->buf[dsp_buf];
+
+ pr_debug("%s:session[%d]dsp-buf[%d][%p]cpu_buf[%d][%p]\n",
+ __func__,
+ ac->session,
+ dsp_buf,
+ (void *)port->buf[dsp_buf].data,
+ port->cpu_buf,
+ (void *)port->buf[port->cpu_buf].phys);
+
+ read.hdr.opcode = ASM_DATA_CMD_READ;
+ read.buf_add = ab->phys;
+ read.buf_size = ab->size;
+ read.uid = port->dsp_buf;
+ read.hdr.token = port->dsp_buf;
+
+ port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+ pr_info("%s:buf add[0x%x] token[%d] uid[%d]\n", __func__,
+ read.buf_add,
+ read.hdr.token,
+ read.uid);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_err("read op[0x%x]rc[%d]\n", read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+ }
+fail_cmd:
+ return -EINVAL;
+}
+
+
+static void q6asm_add_hdr_async(struct audio_client *ac, struct apr_hdr *hdr,
+ uint32_t pkt_size, uint32_t cmd_flg)
+{
+ pr_debug("session=%d pkt size=%d cmd_flg=%d\n", pkt_size, cmd_flg,
+ ac->session);
+ hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \
+ APR_HDR_LEN(sizeof(struct apr_hdr)),\
+ APR_PKT_VER);
+ hdr->src_svc = ((struct apr_svc *)ac->apr)->id;
+ hdr->src_domain = APR_DOMAIN_APPS;
+ hdr->dest_svc = APR_SVC_ASM;
+ hdr->dest_domain = APR_DOMAIN_ADSP;
+ hdr->src_port = ((ac->session << 8) & 0xFF00) | 0x01;
+ hdr->dest_port = ((ac->session << 8) & 0xFF00) | 0x01;
+ if (cmd_flg) {
+ hdr->token = ac->session;
+ atomic_set(&ac->cmd_state, 1);
+ }
+ hdr->pkt_size = pkt_size;
+ return;
+}
+
+int q6asm_async_write(struct audio_client *ac,
+ struct audio_aio_write_param *param)
+{
+ int rc = 0;
+ struct asm_stream_cmd_write write;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ q6asm_add_hdr_async(ac, &write.hdr, sizeof(write), FALSE);
+
+ /* Pass physical address as token for AIO scheme */
+ write.hdr.token = param->uid;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE;
+ write.buf_add = param->paddr;
+ write.avail_bytes = param->len;
+ write.uid = param->uid;
+ write.msw_ts = param->msw_ts;
+ write.lsw_ts = param->lsw_ts;
+ /* Use 0xFF00 for disabling timestamps */
+ if (param->flags == 0xFF00)
+ write.uflags = (0x00000000 | (param->flags & 0x800000FF));
+ else
+ write.uflags = (0x80000000 | param->flags);
+
+ pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+ write.buf_add, write.avail_bytes);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+ if (rc < 0) {
+ pr_debug("[%s] write op[0x%x]rc[%d]\n", __func__,
+ write.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_async_read(struct audio_client *ac,
+ struct audio_aio_read_param *param)
+{
+ int rc = 0;
+ struct asm_stream_cmd_read read;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+ /* Pass physical address as token for AIO scheme */
+ read.hdr.token = param->paddr;
+ read.hdr.opcode = ASM_DATA_CMD_READ;
+ read.buf_add = param->paddr;
+ read.buf_size = param->len;
+ read.uid = param->uid;
+
+ pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+ read.buf_add, read.buf_size);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
+ read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_async_read_compressed(struct audio_client *ac,
+ struct audio_aio_read_param *param)
+{
+ int rc = 0;
+ struct asm_stream_cmd_read read;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s: APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ q6asm_add_hdr_async(ac, &read.hdr, sizeof(read), FALSE);
+
+ /* Pass physical address as token for AIO scheme */
+ read.hdr.token = param->paddr;
+ read.hdr.opcode = ASM_DATA_CMD_READ_COMPRESSED;
+ read.buf_add = param->paddr;
+ read.buf_size = param->len;
+ read.uid = param->uid;
+
+ pr_debug("%s: session[%d] bufadd[0x%x]len[0x%x]", __func__, ac->session,
+ read.buf_add, read.buf_size);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &read);
+ if (rc < 0) {
+ pr_debug("[%s] read op[0x%x]rc[%d]\n", __func__,
+ read.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_write(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ int rc = 0;
+ struct asm_stream_cmd_write write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int dsp_buf = 0;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[IN];
+
+ q6asm_add_hdr(ac, &write.hdr, sizeof(write),
+ FALSE);
+ mutex_lock(&port->lock);
+
+ dsp_buf = port->dsp_buf;
+ ab = &port->buf[dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE;
+ write.buf_add = ab->phys;
+ write.avail_bytes = len;
+ write.uid = port->dsp_buf;
+ write.msw_ts = msw_ts;
+ write.lsw_ts = lsw_ts;
+ /* Use 0xFF00 for disabling timestamps */
+ if (flags == 0xFF00)
+ write.uflags = (0x00000000 | (flags & 0x800000FF));
+ else
+ write.uflags = (0x80000000 | flags);
+ port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+
+ pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]"
+ , __func__,
+ ab->phys,
+ write.buf_add,
+ write.hdr.token,
+ write.uid);
+ mutex_unlock(&port->lock);
+#ifdef CONFIG_DEBUG_FS
+ if (out_enable_flag) {
+ char zero_pattern[2] = {0x00, 0x00};
+ /* If First two byte is non zero and last two byte
+ is zero then it is warm output pattern */
+ if ((strncmp(((char *)ab->data), zero_pattern, 2)) &&
+ (!strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+ do_gettimeofday(&out_warm_tv);
+ pr_debug("WARM:apr_send_pkt at %ld sec %ld microsec\n",
+ out_warm_tv.tv_sec,\
+ out_warm_tv.tv_usec);
+ pr_debug("Warm Pattern Matched");
+ }
+ /* If First two byte is zero and last two byte is
+ non zero then it is cont ouput pattern */
+ else if ((!strncmp(((char *)ab->data), zero_pattern, 2))
+ && (strncmp(((char *)ab->data + 2), zero_pattern, 2))) {
+ do_gettimeofday(&out_cont_tv);
+ pr_debug("CONT:apr_send_pkt at %ld sec %ld microsec\n",
+ out_cont_tv.tv_sec,\
+ out_cont_tv.tv_usec);
+ pr_debug("Cont Pattern Matched");
+ }
+ }
+#endif
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+ if (rc < 0) {
+ pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ pr_debug("%s: WRITE SUCCESS\n", __func__);
+ return 0;
+ }
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_write_nolock(struct audio_client *ac, uint32_t len, uint32_t msw_ts,
+ uint32_t lsw_ts, uint32_t flags)
+{
+ int rc = 0;
+ struct asm_stream_cmd_write write;
+ struct audio_port_data *port;
+ struct audio_buffer *ab;
+ int dsp_buf = 0;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s: session[%d] len=%d", __func__, ac->session, len);
+ if (ac->io_mode & SYNC_IO_MODE) {
+ port = &ac->port[IN];
+
+ q6asm_add_hdr_async(ac, &write.hdr, sizeof(write),
+ FALSE);
+
+ dsp_buf = port->dsp_buf;
+ ab = &port->buf[dsp_buf];
+
+ write.hdr.token = port->dsp_buf;
+ write.hdr.opcode = ASM_DATA_CMD_WRITE;
+ write.buf_add = ab->phys;
+ write.avail_bytes = len;
+ write.uid = port->dsp_buf;
+ write.msw_ts = msw_ts;
+ write.lsw_ts = lsw_ts;
+ /* Use 0xFF00 for disabling timestamps */
+ if (flags == 0xFF00)
+ write.uflags = (0x00000000 | (flags & 0x800000FF));
+ else
+ write.uflags = (0x80000000 | flags);
+ port->dsp_buf = (port->dsp_buf + 1) & (port->max_buf_cnt - 1);
+
+ pr_debug("%s:ab->phys[0x%x]bufadd[0x%x]token[0x%x]buf_id[0x%x]"
+ , __func__,
+ ab->phys,
+ write.buf_add,
+ write.hdr.token,
+ write.uid);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &write);
+ if (rc < 0) {
+ pr_err("write op[0x%x]rc[%d]\n", write.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ pr_debug("%s: WRITE SUCCESS\n", __func__);
+ return 0;
+ }
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_get_session_time(struct audio_client *ac, uint64_t *tstamp)
+{
+ struct apr_hdr hdr;
+ int rc;
+
+ if (!ac || ac->apr == NULL || tstamp == NULL) {
+ pr_err("APR handle or tstamp NULL\n");
+ return -EINVAL;
+ }
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), FALSE);
+ hdr.opcode = ASM_SESSION_CMD_GET_SESSION_TIME;
+ atomic_set(&ac->time_flag, 1);
+
+ pr_debug("%s: session[%d]opcode[0x%x]\n", __func__,
+ ac->session,
+ hdr.opcode);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+ if (rc < 0) {
+ pr_err("Commmand 0x%x failed\n", hdr.opcode);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->time_wait,
+ (atomic_read(&ac->time_flag) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("%s: timeout in getting session time from DSP\n",
+ __func__);
+ goto fail_cmd;
+ }
+
+ *tstamp = ac->time_stamp;
+ return 0;
+
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_cmd(struct audio_client *ac, int cmd)
+{
+ struct apr_hdr hdr;
+ int rc;
+ atomic_t *state;
+ int cnt = 0;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ q6asm_add_hdr(ac, &hdr, sizeof(hdr), TRUE);
+ switch (cmd) {
+ case CMD_PAUSE:
+ pr_debug("%s:CMD_PAUSE\n", __func__);
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ state = &ac->cmd_state;
+ break;
+ case CMD_FLUSH:
+ pr_debug("%s:CMD_FLUSH\n", __func__);
+ hdr.opcode = ASM_STREAM_CMD_FLUSH;
+ state = &ac->cmd_state;
+ break;
+ case CMD_OUT_FLUSH:
+ pr_debug("%s:CMD_OUT_FLUSH\n", __func__);
+ hdr.opcode = ASM_STREAM_CMD_FLUSH_READBUFS;
+ state = &ac->cmd_state;
+ break;
+ case CMD_EOS:
+ pr_debug("%s:CMD_EOS\n", __func__);
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ atomic_set(&ac->cmd_state, 0);
+ state = &ac->cmd_state;
+ break;
+ case CMD_CLOSE:
+ pr_debug("%s:CMD_CLOSE\n", __func__);
+ hdr.opcode = ASM_STREAM_CMD_CLOSE;
+ atomic_set(&ac->cmd_close_state, 1);
+ state = &ac->cmd_close_state;
+ break;
+ default:
+ pr_err("Invalid format[%d]\n", cmd);
+ goto fail_cmd;
+ }
+ pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
+ ac->session,
+ hdr.opcode);
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+ if (rc < 0) {
+ pr_err("Commmand 0x%x failed\n", hdr.opcode);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait, (atomic_read(state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for response opcode[0x%x]\n",
+ hdr.opcode);
+ goto fail_cmd;
+ }
+ if (cmd == CMD_FLUSH)
+ q6asm_reset_buf_state(ac);
+ if (cmd == CMD_CLOSE) {
+ /* check if DSP return all buffers */
+ if (ac->port[IN].buf) {
+ for (cnt = 0; cnt < ac->port[IN].max_buf_cnt;
+ cnt++) {
+ if (ac->port[IN].buf[cnt].used == IN) {
+ pr_debug("Write Buf[%d] not returned\n",
+ cnt);
+ }
+ }
+ }
+ if (ac->port[OUT].buf) {
+ for (cnt = 0; cnt < ac->port[OUT].max_buf_cnt; cnt++) {
+ if (ac->port[OUT].buf[cnt].used == OUT) {
+ pr_debug("Read Buf[%d] not returned\n",
+ cnt);
+ }
+ }
+ }
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_cmd_nowait(struct audio_client *ac, int cmd)
+{
+ struct apr_hdr hdr;
+ int rc;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("%s:APR handle NULL\n", __func__);
+ return -EINVAL;
+ }
+ q6asm_add_hdr_async(ac, &hdr, sizeof(hdr), TRUE);
+ switch (cmd) {
+ case CMD_PAUSE:
+ pr_debug("%s:CMD_PAUSE\n", __func__);
+ hdr.opcode = ASM_SESSION_CMD_PAUSE;
+ break;
+ case CMD_EOS:
+ pr_debug("%s:CMD_EOS\n", __func__);
+ hdr.opcode = ASM_DATA_CMD_EOS;
+ break;
+ default:
+ pr_err("%s:Invalid format[%d]\n", __func__, cmd);
+ goto fail_cmd;
+ }
+ pr_debug("%s:session[%d]opcode[0x%x] ", __func__,
+ ac->session,
+ hdr.opcode);
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &hdr);
+ if (rc < 0) {
+ pr_err("%s:Commmand 0x%x failed\n", __func__, hdr.opcode);
+ goto fail_cmd;
+ }
+ atomic_inc(&ac->nowait_cmd_cnt);
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+static void q6asm_reset_buf_state(struct audio_client *ac)
+{
+ int cnt = 0;
+ int loopcnt = 0;
+ int used;
+ struct audio_port_data *port = NULL;
+
+ if (ac->io_mode & SYNC_IO_MODE) {
+ used = (ac->io_mode & TUN_WRITE_IO_MODE ? 1 : 0);
+ mutex_lock(&ac->cmd_lock);
+ for (loopcnt = 0; loopcnt <= OUT; loopcnt++) {
+ port = &ac->port[loopcnt];
+ cnt = port->max_buf_cnt - 1;
+ port->dsp_buf = 0;
+ port->cpu_buf = 0;
+ while (cnt >= 0) {
+ if (!port->buf)
+ continue;
+ port->buf[cnt].used = used;
+ cnt--;
+ }
+ }
+ mutex_unlock(&ac->cmd_lock);
+ }
+}
+
+int q6asm_reg_tx_overflow(struct audio_client *ac, uint16_t enable)
+{
+ struct asm_stream_cmd_reg_tx_overflow_event tx_overflow;
+ int rc;
+
+ if (!ac || ac->apr == NULL) {
+ pr_err("APR handle NULL\n");
+ return -EINVAL;
+ }
+ pr_debug("%s:session[%d]enable[%d]\n", __func__,
+ ac->session, enable);
+ q6asm_add_hdr(ac, &tx_overflow.hdr, sizeof(tx_overflow), TRUE);
+
+ tx_overflow.hdr.opcode = \
+ ASM_SESSION_CMD_REGISTER_FOR_TX_OVERFLOW_EVENTS;
+ /* tx overflow event: enable */
+ tx_overflow.enable = enable;
+
+ rc = apr_send_pkt(ac->apr, (uint32_t *) &tx_overflow);
+ if (rc < 0) {
+ pr_err("tx overflow op[0x%x]rc[%d]\n", \
+ tx_overflow.hdr.opcode, rc);
+ goto fail_cmd;
+ }
+ rc = wait_event_timeout(ac->cmd_wait,
+ (atomic_read(&ac->cmd_state) == 0), 5*HZ);
+ if (!rc) {
+ pr_err("timeout. waited for tx overflow\n");
+ goto fail_cmd;
+ }
+ return 0;
+fail_cmd:
+ return -EINVAL;
+}
+
+int q6asm_get_apr_service_id(int session_id)
+{
+ pr_debug("%s\n", __func__);
+
+ if (session_id < 0 || session_id > SESSION_MAX) {
+ pr_err("%s: invalid session_id = %d\n", __func__, session_id);
+ return -EINVAL;
+ }
+
+ return ((struct apr_svc *)session[session_id]->apr)->id;
+}
+
+
+static int __init q6asm_init(void)
+{
+ pr_debug("%s\n", __func__);
+ init_waitqueue_head(&this_mmap.cmd_wait);
+ memset(session, 0, sizeof(session));
+#ifdef CONFIG_DEBUG_FS
+ out_buffer = kmalloc(OUT_BUFFER_SIZE, GFP_KERNEL);
+ out_dentry = debugfs_create_file("audio_out_latency_measurement_node",\
+ 0664,\
+ NULL, NULL, &audio_output_latency_debug_fops);
+ if (IS_ERR(out_dentry))
+ pr_err("debugfs_create_file failed\n");
+ in_buffer = kmalloc(IN_BUFFER_SIZE, GFP_KERNEL);
+ in_dentry = debugfs_create_file("audio_in_latency_measurement_node",\
+ 0664,\
+ NULL, NULL, &audio_input_latency_debug_fops);
+ if (IS_ERR(in_dentry))
+ pr_err("debugfs_create_file failed\n");
+#endif
+ return 0;
+}
+
+device_initcall(q6asm_init);