aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Konovalov <andrey.konovalov@linaro.org>2015-11-19 13:01:56 +0300
committerAndrey Konovalov <andrey.konovalov@linaro.org>2015-11-19 13:01:56 +0300
commit4b131c9bf8af9bc7a25441251f60a8ca7fc69c07 (patch)
treeba0bc4861f2f606b088b09c53389f06e88a69894
parentd4e209db57abe07163fe84b43b533a4b6246b0c3 (diff)
parent08fad54a88981323247c7199ccbb8d42c596466a (diff)
Automatically merging tracking-integration-linux-qcomlt into merge-linux-linaroHEADll-20151119.0linux-linaro-4.3-2015.11ll_20151119.0linux-linaro
Conflicting files:
-rw-r--r--Documentation/DMA-attributes.txt9
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/msm/acc.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/msm/ids.txt65
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt28
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt38
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,a53cc22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc.txt20
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,hfpll.txt40
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,krait-cc.txt22
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmcc.txt35
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_adm.txt16
-rw-r--r--Documentation/devicetree/bindings/drm/msm/dsi.txt13
-rw-r--r--Documentation/devicetree/bindings/gpio/qcom,smp2p.txt105
-rw-r--r--Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt59
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt6
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt51
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-tsens.txt33
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt6
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts66
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts427
-rw-r--r--arch/arm/boot/dts/qcom-apq8064-ifc6410.dts192
-rw-r--r--arch/arm/boot/dts/qcom-apq8064.dtsi947
-rw-r--r--arch/arm/boot/dts/qcom-apq8084.dtsi106
-rw-r--r--arch/arm/boot/dts/qcom-msm8960.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi420
-rw-r--r--arch/arm/common/Kconfig3
-rw-r--r--arch/arm/common/Makefile1
-rw-r--r--arch/arm/common/krait-l2-accessors.c58
-rw-r--r--arch/arm/configs/multi_v7_defconfig59
-rw-r--r--arch/arm/configs/qcom_defconfig154
-rw-r--r--arch/arm/include/asm/krait-l2-accessors.h20
-rw-r--r--arch/arm/mach-qcom/Kconfig2
-rw-r--r--arch/arm/mm/dma-mapping.c9
-rw-r--r--arch/arm64/Kconfig26
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi27
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi64
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi254
-rw-r--r--arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi238
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi254
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi21
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi122
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dts3
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi319
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi635
-rw-r--r--arch/arm64/boot/dts/qcom/pm8916.dtsi126
-rw-r--r--arch/arm64/configs/defconfig127
-rw-r--r--arch/arm64/configs/qcom_defconfig274
-rw-r--r--arch/arm64/include/asm/cacheflush.h6
-rw-r--r--arch/arm64/include/asm/cpu_ops.h5
-rw-r--r--arch/arm64/include/asm/device.h9
-rw-r--r--arch/arm64/include/asm/dma-iommu.h36
-rw-r--r--arch/arm64/include/asm/smp_plat.h2
-rw-r--r--arch/arm64/kernel/cpu_ops.c16
-rw-r--r--arch/arm64/kernel/psci.c1
-rw-r--r--arch/arm64/kernel/smp.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c6
-rw-r--r--arch/arm64/mm/cache.S4
-rw-r--r--arch/arm64/mm/dma-mapping.c1039
-rw-r--r--drivers/amba/bus.c139
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/opp.c100
-rw-r--r--drivers/bluetooth/Kconfig11
-rw-r--r--drivers/bluetooth/Makefile1
-rw-r--r--drivers/bluetooth/btqcomsmd.c198
-rw-r--r--drivers/clk/clk-mux.c75
-rw-r--r--drivers/clk/clk.c100
-rw-r--r--drivers/clk/qcom/Kconfig68
-rw-r--r--drivers/clk/qcom/Makefile11
-rw-r--r--drivers/clk/qcom/clk-a53.c192
-rw-r--r--drivers/clk/qcom/clk-hfpll.c253
-rw-r--r--drivers/clk/qcom/clk-hfpll.h54
-rw-r--r--drivers/clk/qcom/clk-krait.c170
-rw-r--r--drivers/clk/qcom/clk-krait.h49
-rw-r--r--drivers/clk/qcom/clk-rcg.h4
-rw-r--r--drivers/clk/qcom/clk-rcg2.c159
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.c295
-rw-r--r--drivers/clk/qcom/clk-regmap-mux-div.h63
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c261
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.h142
-rw-r--r--drivers/clk/qcom/common.c16
-rw-r--r--drivers/clk/qcom/common.h2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c38
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c83
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c573
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c198
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c14
-rw-r--r--drivers/clk/qcom/gdsc.c228
-rw-r--r--drivers/clk/qcom/gdsc.h50
-rw-r--r--drivers/clk/qcom/hfpll.c109
-rw-r--r--drivers/clk/qcom/kpss-xcc.c95
-rw-r--r--drivers/clk/qcom/krait-cc.c352
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c74
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c65
-rw-r--r--drivers/clk/qcom/rpmcc-apq8064.c347
-rw-r--r--drivers/clk/qcom/rpmcc.c211
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq-dt.c124
-rw-r--r--drivers/cpufreq/qcom-cpufreq.c204
-rw-r--r--drivers/cpuidle/Kconfig.arm7
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/qcom_adm.c856
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/qcom_scm-32.c418
-rw-r--r--drivers/firmware/qcom_scm-64.c863
-rw-r--r--drivers/firmware/qcom_scm.c175
-rw-r--r--drivers/firmware/qcom_scm.h72
-rw-r--r--drivers/gpio/Kconfig15
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/gpio-qcom-smp2p.c591
-rw-r--r--drivers/gpio/gpio-qcom-smsm.c328
-rw-r--r--drivers/gpu/drm/drm_edid.c12
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c161
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c592
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h58
-rw-r--r--drivers/gpu/drm/i2c/adv7511_audio.c312
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c59
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c94
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c43
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c83
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/i2c/busses/i2c-qup.c943
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/Makefile3
-rw-r--r--drivers/iommu/msm_iommu.c536
-rw-r--r--drivers/iommu/msm_iommu.h73
-rw-r--r--drivers/iommu/msm_iommu_dev.c392
-rw-r--r--drivers/iommu/msm_iommu_hw-8xxx.h6
-rw-r--r--drivers/iommu/of_iommu.c29
-rw-r--r--drivers/iommu/qcom/Kconfig43
-rw-r--r--drivers/iommu/qcom/Makefile7
-rw-r--r--drivers/iommu/qcom/msm_iommu-v1.c1538
-rw-r--r--drivers/iommu/qcom/msm_iommu.c206
-rw-r--r--drivers/iommu/qcom/msm_iommu_dev-v1.c627
-rw-r--r--drivers/iommu/qcom/msm_iommu_hw-v1.h2320
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.c645
-rw-r--r--drivers/iommu/qcom/msm_iommu_pagetable.h33
-rw-r--r--drivers/iommu/qcom/msm_iommu_perfmon.h233
-rw-r--r--drivers/iommu/qcom/msm_iommu_priv.h71
-rw-r--r--drivers/iommu/qcom/msm_iommu_sec.c876
-rw-r--r--drivers/mfd/qcom_rpm.c36
-rw-r--r--drivers/mfd/ssbi.c7
-rw-r--r--drivers/mmc/Kconfig2
-rw-r--r--drivers/mmc/core/Kconfig8
-rw-r--r--drivers/mmc/core/Makefile4
-rw-r--r--drivers/mmc/core/pwrseq.c92
-rw-r--r--drivers/mmc/core/pwrseq.h25
-rw-r--r--drivers/mmc/core/pwrseq_emmc.c91
-rw-r--r--drivers/mmc/core/pwrseq_simple.c118
-rw-r--r--drivers/mmc/host/mmci.c10
-rw-r--r--drivers/mmc/host/sdhci-msm.c1
-rw-r--r--drivers/mmc/host/sdhci.c9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/Makefile5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c63
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h18
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c53
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c37
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h9
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c314
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h44
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcnss_core.c295
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcnss_core.h99
-rw-r--r--drivers/of/address.c20
-rw-r--r--drivers/of/device.c77
-rw-r--r--drivers/of/of_pci.c3
-rw-r--r--drivers/of/platform.c16
-rw-r--r--drivers/pci/host/Kconfig5
-rw-r--r--drivers/pci/host/Makefile1
-rw-r--r--drivers/pci/host/pci-qcom.c967
-rw-r--r--drivers/power/avs/Kconfig14
-rw-r--r--drivers/power/avs/Makefile1
-rw-r--r--drivers/power/avs/qcom-cpr.c1988
-rw-r--r--drivers/regulator/Kconfig12
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/qcom_smd-regulator.c116
-rw-r--r--drivers/remoteproc/Kconfig16
-rw-r--r--drivers/remoteproc/Makefile2
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c1075
-rw-r--r--drivers/remoteproc/qcom_tz_pil.c719
-rw-r--r--drivers/remoteproc/remoteproc_core.c44
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/cpu_ops.c343
-rw-r--r--drivers/soc/qcom/smd-rpm.c3
-rw-r--r--drivers/soc/qcom/smd.c396
-rw-r--r--drivers/soc/qcom/smem.c122
-rw-r--r--drivers/soc/qcom/spm.c172
-rw-r--r--drivers/thermal/Kconfig5
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/qcom/Kconfig10
-rw-r--r--drivers/thermal/qcom/Makefile2
-rw-r--r--drivers/thermal/qcom/tsens-8916.c107
-rw-r--r--drivers/thermal/qcom/tsens-8960.c291
-rw-r--r--drivers/thermal/qcom/tsens-8974.c239
-rw-r--r--drivers/thermal/qcom/tsens-common.c130
-rw-r--r--drivers/thermal/qcom/tsens.c206
-rw-r--r--drivers/thermal/qcom/tsens.h69
-rw-r--r--drivers/tty/serial/msm_serial.c618
-rw-r--r--drivers/tty/serial/msm_serial.h55
-rw-r--r--drivers/usb/chipidea/Kconfig1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c3
-rw-r--r--drivers/usb/chipidea/core.c125
-rw-r--r--drivers/usb/chipidea/otg.c39
-rw-r--r--drivers/usb/host/ehci-msm.c4
-rw-r--r--drivers/usb/phy/phy-msm-usb.c52
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_mipi_dsi.h9
-rw-r--r--include/dt-bindings/arm/qcom-ids.h33
-rw-r--r--include/dt-bindings/clock/qcom,gcc-apq8084.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8916.h30
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8960.h2
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8974.h3
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-apq8084.h8
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8974.h8
-rw-r--r--include/dt-bindings/clock/qcom,rpmcc-msm8916.h44
-rw-r--r--include/dt-bindings/msm/msm-bus-ids.h629
-rw-r--r--include/linux/clk-provider.h11
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/dma-attrs.h2
-rw-r--r--include/linux/memblock.h1
-rw-r--r--include/linux/mfd/qcom_rpm.h1
-rw-r--r--include/linux/msm-bus-board.h198
-rw-r--r--include/linux/msm-bus.h139
-rw-r--r--include/linux/msm_iommu_domains.h239
-rw-r--r--include/linux/of_device.h14
-rw-r--r--include/linux/pm_opp.h10
-rw-r--r--include/linux/qcom_iommu.h388
-rw-r--r--include/linux/qcom_scm.h32
-rw-r--r--include/linux/regulator/qcom_smd-regulator.h30
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/soc/qcom/smd.h67
-rw-r--r--include/linux/usb/chipidea.h23
-rw-r--r--include/net/bluetooth/hci.h1
-rw-r--r--include/soc/qcom/socinfo.h610
-rw-r--r--kernel/configs/distro.config230
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/page_alloc.c9
-rw-r--r--sound/soc/codecs/Kconfig4
-rw-r--r--sound/soc/codecs/Makefile3
-rw-r--r--sound/soc/codecs/msm8x16-wcd-tables.c742
-rw-r--r--sound/soc/codecs/msm8x16-wcd.c1636
-rw-r--r--sound/soc/codecs/msm8x16-wcd.h234
-rw-r--r--sound/soc/codecs/msm8x16_wcd_registers.h518
255 files changed, 38925 insertions, 1704 deletions
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 18dc52c4f2a0..9ac45c33c568 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -100,3 +100,12 @@ allocated by dma_alloc_attrs() function from individual pages if it can
be mapped as contiguous chunk into device dma address space. By
specifying this attribute the allocated buffer is forced to be contiguous
also in physical memory.
+
+DMA_ATTR_STRONGLY_ORDERED
+-------------------------
+
+DMA_ATTR_STRONGLY_ORDERED allocates memory with a very restrictive type
+of mapping (no unaligned accesses, no re-ordering, no write merging, no
+buffering, no pre-fetching). This has severe performance penalties and
+should not be used for general purpose DMA allocations. It should only
+be used if one of the restrictions on strongly ordered memory is required.
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 91e6e5c478d0..259c50f0a720 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -185,6 +185,8 @@ nodes to be present and contain the properties described below.
be one of:
"psci"
"spin-table"
+ "qcom,arm-cortex-acc"
+
# On ARM 32-bit systems this property is optional and
can be one of:
"allwinner,sun6i-a31"
diff --git a/Documentation/devicetree/bindings/arm/msm/acc.txt b/Documentation/devicetree/bindings/arm/msm/acc.txt
new file mode 100644
index 000000000000..ae2d7253b363
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/acc.txt
@@ -0,0 +1,19 @@
+Application Processor Sub-system (APSS) Application Clock Controller (ACC)
+
+The ACC provides clock, power domain, and reset control to a CPU. There is one ACC
+register region per CPU within the APSS remapped region as well as an alias register
+region that remaps accesses to the ACC associated with the CPU accessing the region.
+
+Required properties:
+- compatible: Must be "qcom,arm-cortex-acc"
+- reg: The first element specifies the base address and size of
+ the register region. An optional second element specifies
+ the base address and size of the alias register region.
+
+Example:
+
+ clock-controller@b088000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b088000 0x1000>,
+ <0x0b008000 0x1000>;
+ }
diff --git a/Documentation/devicetree/bindings/arm/msm/ids.txt b/Documentation/devicetree/bindings/arm/msm/ids.txt
new file mode 100644
index 000000000000..9ee8428f4670
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/ids.txt
@@ -0,0 +1,65 @@
+* MSM-ID
+
+The qcom,msm-id entry specifies the MSM chipset and hardware revision. It can
+optionally be an array of these to indicate multiple hardware that use the same
+device tree. It is expected that the bootloader will use this information at
+boot-up to decide which device tree to use when given multiple device trees,
+some of which may not be compatible with the actual hardware. It is the
+bootloader's responsibility to pass the correct device tree to the kernel.
+
+PROPERTIES
+
+- qcom,msm-id:
+ Usage: required
+ Value type: <prop-encoded-array> (<chipset_id, rev_id> [, <c2, r2> ..])
+ Definition:
+ The "chipset_id" consists of three fields as below:
+
+ bits 0-15 = The unique MSM chipset id.
+ bits 16-31 = Reserved. Should be 0
+
+ chipset_id is an exact match value
+
+ The "rev_id" is a chipset specific 32-bit id that represents
+ the version of the chipset.
+
+ The rev_id is a best match id. The bootloader will look for
+ the closest possible patch.
+
+* BOARD-ID
+
+The qcom,board-id entry specifies the board type and revision information. It
+can optionally be an array of these to indicate multiple boards that use the
+same device tree. It is expected that the bootloader will use this information
+at boot-up to decide which device tree to use when given multiple device trees,
+some of which may not be compatible with the actual hardware. It is the
+bootloader's responsibility to pass the correct device tree to the kernel.
+
+PROPERTIES
+
+- qcom,board-id:
+ Usage: required
+ Value type: <prop-encoded-array> (<board_id, subtype_id> [, <b2, s2> ..])
+ Definition:
+ The "board_id" consists of three fields as below:
+
+ bits 31-24 = Unusued.
+ bits 23-16 = Platform Version Major
+ bits 15-8 = Platfrom Version Minor
+ bits 7-0 = Platform Type
+
+ Platform Type field is an exact match value. The Platform
+ Major/Minor field is a best match. The bootloader will look
+ for the closest possible match.
+
+ The "subtype_id" is unique to a Platform Type/Chipset ID. For
+ a given Platform Type, there will typically only be a single
+ board and the subtype_id will be 0. However in some cases board
+ variants may need to be distinquished by different subtype_id
+ values.
+
+ subtype_id is an exact match value.
+
+EXAMPLE:
+ qcom,board-id = <15 2>;
+ qcom,msm-id = <0x1007e 0>;
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
index 1333db9acfee..382a574a5c55 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-acc.txt
@@ -21,10 +21,17 @@ PROPERTIES
the register region. An optional second element specifies
the base address and size of the alias register region.
+- clock-output-names:
+ Usage: optional
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpuX_aux where X is a
+ CPU number starting at 0.
+
Example:
clock-controller@2088000 {
compatible = "qcom,kpss-acc-v2";
reg = <0x02088000 0x1000>,
<0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
new file mode 100644
index 000000000000..d1e12f16a28c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,kpss-gcc.txt
@@ -0,0 +1,28 @@
+Krait Processor Sub-system (KPSS) Global Clock Controller (GCC)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: should be one of:
+ "qcom,kpss-gcc"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: base address and size of the register region
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the output clock. Typically acpu_l2_aux indicating
+ an L2 cache auxiliary clock.
+
+Example:
+
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc";
+ reg = <0x2011000 0x1000>;
+ clock-output-names = "acpu_l2_aux";
+ };
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt
new file mode 100644
index 000000000000..e7cb10426a3b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,pvs.txt
@@ -0,0 +1,38 @@
+Qualcomm Process Voltage Scaling Tables
+
+The node name is required to be "qcom,pvs". There shall only be one
+such node present in the root of the tree.
+
+PROPERTIES
+
+- qcom,pvs-format-a or qcom,pvs-format-b:
+ Usage: required
+ Value type: <empty>
+ Definition: Indicates the format of qcom,speedX-pvsY-bin-vZ properties.
+ If qcom,pvs-format-a is used the table is two columns
+ (frequency and voltage in that order). If qcom,pvs-format-b is used the table is three columns (frequency, voltage,
+ and current in that order).
+
+- qcom,speedX-pvsY-bin-vZ:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: The PVS table corresponding to the speed bin X, pvs bin Y,
+ and version Z.
+Example:
+
+ qcom,pvs {
+ qcom,pvs-format-a;
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >,
+ < 1026000000 1125000 >,
+ < 1134000000 1175000 >,
+ < 1242000000 1200000 >,
+ < 1350000000 1225000 >,
+ < 1458000000 1237500 >,
+ < 1512000000 1250000 >;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,a53cc b/Documentation/devicetree/bindings/clock/qcom,a53cc
new file mode 100644
index 000000000000..209cae8afc1f
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,a53cc
@@ -0,0 +1,22 @@
+A53 Clock Controller
+
+Required properties :
+- compatible : shall contain:
+ "qcom,a53cc"
+- reg : shall contain base register location and length
+ of the A53 PLL
+- #clock-cells : shall contain 1
+- qcom,apcs : phandle of apcs syscon node
+
+Example:
+ apcs: syscon@b011000 {
+ compatible = "syscon";
+ reg = <0x0b011000 0x1000>;
+ };
+
+ a53cc: clock-controller@0b016000 {
+ compatible = "qcom,clock-a53-msm8916";
+ reg = <0x0b016000 0x40>;
+ #clock-cells = <1>;
+ qcom,apcs = <&apcs>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 54c23f34f194..f1cf499c649f 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -18,6 +18,13 @@ Required properties :
- #clock-cells : shall contain 1
- #reset-cells : shall contain 1
+Optional properties:
+- Qualcomm TSENS (thermal sensor device) on some devices can
+be part of GCC and hence the TSENS properties can also be
+part of the GCC/clock-controller node.
+For more details on the TSENS properties please refer
+Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+
Example:
clock-controller@900000 {
compatible = "qcom,gcc-msm8960";
@@ -25,3 +32,16 @@ Example:
#clock-cells = <1>;
#reset-cells = <1>;
};
+
+Example of GCC with TSENS properties:
+ clock-controller@900000 {
+ compatible = "qcom,gcc-apq8064";
+ reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <1176 1176 1154 1176 1111
+ 1132 1132 1199 1132 1199 1132>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,hfpll.txt b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
new file mode 100644
index 000000000000..fee92bb30344
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,hfpll.txt
@@ -0,0 +1,40 @@
+High-Frequency PLL (HFPLL)
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be "qcom,hfpll"
+
+- reg:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: address and size of HPLL registers. An optional second
+ element specifies the address and size of the alias
+ register region.
+
+- clock-output-names:
+ Usage: required
+ Value type: <string>
+ Definition: Name of the PLL. Typically hfpllX where X is a CPU number
+ starting at 0. Otherwise hfpll_Y where Y is more specific
+ such as "l2".
+
+Example:
+
+1) An HFPLL for the L2 cache.
+
+ clock-controller@f9016000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf9016000 0x30>;
+ clock-output-names = "hfpll_l2";
+ };
+
+2) An HFPLL for CPU0. This HFPLL has the alias register region.
+
+ clock-controller@f908a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll0";
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
new file mode 100644
index 000000000000..874138f88ec6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,krait-cc.txt
@@ -0,0 +1,22 @@
+Krait Clock Controller
+
+PROPERTIES
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,krait-cc-v1"
+ "qcom,krait-cc-v2"
+
+- #clock-cells:
+ Usage: required
+ Value type: <u32>
+ Definition: must be 1
+
+Example:
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
new file mode 100644
index 000000000000..bd0fd0cd50dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt
@@ -0,0 +1,35 @@
+Qualcomm RPM Clock Controller Binding
+------------------------------------------------
+The RPM is a dedicated hardware engine for managing the shared
+SoC resources in order to keep the lowest power profile. It
+communicates with other hardware subsystems via shared memory
+and accepts clock requests, aggregates the requests and turns
+the clocks on/off or scales them on demand.
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+ "qcom,rpmcc-msm8916"
+
+- #clock-cells : shall contain 1
+
+Example:
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8916";
+ #clock-cells = <1>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/dma/qcom_adm.txt b/Documentation/devicetree/bindings/dma/qcom_adm.txt
index 9bcab9115982..38d45f8a0dc8 100644
--- a/Documentation/devicetree/bindings/dma/qcom_adm.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_adm.txt
@@ -4,8 +4,7 @@ Required properties:
- compatible: must contain "qcom,adm" for IPQ/APQ8064 and MSM8960
- reg: Address range for DMA registers
- interrupts: Should contain one interrupt shared by all channels
-- #dma-cells: must be <2>. First cell denotes the channel number. Second cell
- denotes CRCI (client rate control interface) flow control assignment.
+- #dma-cells: must be <1>. First cell denotes the channel number.
- clocks: Should contain the core clock and interface clock.
- clock-names: Must contain "core" for the core clock and "iface" for the
interface clock.
@@ -22,7 +21,7 @@ Example:
compatible = "qcom,adm";
reg = <0x18300000 0x100000>;
interrupts = <0 170 0>;
- #dma-cells = <2>;
+ #dma-cells = <1>;
clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
clock-names = "core", "iface";
@@ -35,15 +34,12 @@ Example:
qcom,ee = <0>;
};
-DMA clients must use the format descripted in the dma.txt file, using a three
+DMA clients must use the format descripted in the dma.txt file, using a two
cell specifier for each channel.
-Each dmas request consists of 3 cells:
+Each dmas request consists of two cells:
1. phandle pointing to the DMA controller
2. channel number
- 3. CRCI assignment, if applicable. If no CRCI flow control is required, use 0.
- The CRCI is used for flow control. It identifies the peripheral device that
- is the source/destination for the transferred data.
Example:
@@ -56,7 +52,7 @@ Example:
cs-gpios = <&qcom_pinmux 20 0>;
- dmas = <&adm_dma 6 9>,
- <&adm_dma 5 10>;
+ dmas = <&adm_dma 6>,
+ <&adm_dma 5>;
dma-names = "rx", "tx";
};
diff --git a/Documentation/devicetree/bindings/drm/msm/dsi.txt b/Documentation/devicetree/bindings/drm/msm/dsi.txt
index d56923cd5590..febcc516200d 100644
--- a/Documentation/devicetree/bindings/drm/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/drm/msm/dsi.txt
@@ -44,6 +44,17 @@ Optional properties:
- port: DSI controller output port. This contains one endpoint subnode, with its
remote-endpoint set to the phandle of the connected panel's endpoint.
See Documentation/devicetree/bindings/graph.txt for device graph info.
+- qcom,dsi-logical-lane-swap: Character string to swap logical lane to physical
+ lane mapping. Supported lane mappings:
+ "0123": Logic 0->Phys 0; Logic 1->Phys 1; Logic 2->Phys 2; Logic 3->Phys 3;
+ "3012": Logic 3->Phys 0; Logic 0->Phys 1; Logic 1->Phys 2; Logic 2->Phys 3;
+ "2301": Logic 2->Phys 0; Logic 3->Phys 1; Logic 0->Phys 2; Logic 1->Phys 3;
+ "1230": Logic 1->Phys 0; Logic 2->Phys 1; Logic 3->Phys 2; Logic 0->Phys 3;
+ "0321": Logic 0->Phys 0; Logic 3->Phys 1; Logic 2->Phys 2; Logic 1->Phys 3;
+ "1032": Logic 1->Phys 0; Logic 0->Phys 1; Logic 3->Phys 2; Logic 2->Phys 3;
+ "2103": Logic 2->Phys 0; Logic 1->Phys 1; Logic 0->Phys 2; Logic 3->Phys 3;
+ "3210": Logic 3->Phys 0; Logic 2->Phys 1; Logic 1->Phys 2; Logic 0->Phys 3;
+ Default value is "0123", which means no lane swap.
DSI PHY:
Required properties:
@@ -129,6 +140,8 @@ Example:
remote-endpoint = <&panel_in>;
};
};
+
+ qcom,dsi-logical-lane-swap = "0123";
};
mdss_dsi_phy0: qcom,mdss_dsi_phy@fd922a00 {
diff --git a/Documentation/devicetree/bindings/gpio/qcom,smp2p.txt b/Documentation/devicetree/bindings/gpio/qcom,smp2p.txt
new file mode 100644
index 000000000000..e758c8b5e3fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/qcom,smp2p.txt
@@ -0,0 +1,105 @@
+Qualcomm Shared Memory Point 2 Point binding
+
+The Shared Memory Point to Point (SMP2P) protocol facilitates communication of
+a single 32-bit value between two processors. Each value has a single writer
+(the local side) and a single reader (the remote side). Values are uniquely
+identified in the system by the directed edge (local processor ID to remote
+processor ID) and a string identifier. This documents defines the binding for a
+driver that implements and exposes this protocol as a set of GPIO and interrupt
+controllers.
+
+- compatible:
+ Usage: required
+ Value type: <string>
+ Definition: must be one of:
+ "qcom,smp2p"
+
+- interrupts:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: one entry specifying the smp2p notification interrupt
+
+- qcom,ipc:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: three entries specifying the outgoing ipc bit used for
+ signaling the remote end of the smp2p edge:
+ - phandle to a syscon node representing the apcs registers
+ - u32 representing offset to the register within the syscon
+ - u32 representing the ipc bit within the register
+
+- qcom,smem:
+ Usage: required
+ Value type: <u32 array>
+ Definition: two identifiers of the inbound and outbound smem items used
+ for this edge
+
+- qcom,local-pid:
+ Usage: required
+ Value type: <u32>
+ Definition: specifies the identfier of the local endpoint of this edge
+
+- qcom,remote-pid:
+ Usage: required
+ Value type: <u32>
+ Definition: specifies the identfier of the remote endpoint of this edge
+
+= SUBNODES
+Each SMP2P pair contain a set of inbound and outbound entries, these are
+described in subnodes of the smp2p device node. The node names are not
+important.
+
+- qcom,entry-name:
+ Usage: required
+ Value type: <string>
+ Definition: specifies the name of this entry, for inbound entries this
+ will be used to match against the remotely allocated entry
+ and for outbound entries this name is used for allocating
+ entries
+
+- qcom,inbound:
+ Usage: optional
+ Value type: <empty>
+ Definition: marks the entry as inbound; the node should be specified
+ as a two cell interrupt-controller as defined in
+ "../interrupt-controller/interrupts.txt"
+
+- qcom,outbound:
+ Usage: optional
+ Value type: <empty>
+ Definition: marks the entry as outbound; the node should be specified
+ as a two cell gpio-controller as defined in "gpio.txt"
+
+
+= EXAMPLE
+The following example shows the SMP2P setup with the wireless processor,
+defined from the 8974 apps processor's point-of-view. It encompasses one
+inbound and one outbound entry:
+
+wcnss-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <431>, <451>;
+
+ interrupts = <0 143 1>;
+
+ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+
+ wcnss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ qcom,outbound;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ wcnss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ qcom,inbound;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
new file mode 100644
index 000000000000..21bfbfcc5ae3
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/msm,iommu-v0.txt
@@ -0,0 +1,59 @@
+* QCOM IOMMU
+
+The QCOM IOMMU is an implementation compatible with the ARM VMSA short
+descriptor page tables. It provides address translation for bus masters outside
+of the CPU, each connected to the IOMMU through a port called micro-TLB.
+
+Required Properties:
+
+ - compatible: Must contain "qcom,iommu-v0".
+ - reg: Base address and size of the IOMMU registers.
+ - interrupts: Specifiers for the MMU fault interrupts. For instances that
+ support secure mode two interrupts must be specified, for non-secure and
+ secure mode, in that order. For instances that don't support secure mode a
+ single interrupt must be specified.
+ - #iommu-cells: This is the total number of stream ids that a master would
+ use during transactions which will be specified as a list
+ as a part of iommus property below.
+ - ncb: The total number of context banks in the IOMMU.
+ - clocks : List of clocks to be used during SMMU register access. See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ for information about the format. For each clock specified
+ here, there must be a corresponding entry in clock-names
+ (see below).
+
+ - clock-names : List of clock names corresponding to the clocks specified in
+ the "clocks" property (above). See
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+ for more info.
+
+Each bus master connected to an IOMMU must reference the IOMMU in its device
+node with the following property:
+
+ - iommus: A reference to the IOMMU in multiple cells. The first cell is a
+ phandle to the IOMMU and the second cell is the list of the
+ stream ids used by the device.
+
+Example: mdp iommu and its bus master
+
+ mdp_port0: qcom,iommu@7500000 {
+ compatible = "msm,iommu-v0";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 0>,
+ <GIC_SPI 64 0>;
+ ncb = <2>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp";
+ ...
+ iommus = <&mdp_port0 0 2>;
+ };
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
index a2114c217376..182777fac9a2 100644
--- a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
@@ -26,6 +26,12 @@ Required properties:
Optional properties:
- dmas: Should contain dma specifiers for transmit and receive channels
- dma-names: Should contain "tx" for transmit and "rx" for receive channels
+- qcom,tx-crci: Identificator <u32> for Client Rate Control Interface to be
+ used with TX DMA channel. Required when using DMA for transmission
+ with UARTDM v1.3 and bellow.
+- qcom,rx-crci: Identificator <u32> for Client Rate Control Interface to be
+ used with RX DMA channel. Required when using DMA for reception
+ with UARTDM v1.3 and bellow.
Note: Aliases may be defined to ensure the correct ordering of the UARTs.
The alias serialN will result in the UART being assigned port N. If any
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
new file mode 100644
index 000000000000..19cad6cc427c
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,smem.txt
@@ -0,0 +1,51 @@
+Qualcomm Shared Memory Manager binding
+
+This binding describes the Qualcomm Shared Memory Manager, used to share data
+between various subsystems and OSes in Qualcomm platforms.
+
+- compatible:
+ Usage: required
+ Value type: <stringlist>
+ Definition: must be:
+ "qcom,smem"
+
+- memory-region:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: handle to memory reservation for main SMEM memory region.
+
+- reg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: base address and size pair for any additional memory areas
+ of the shared memory.
+
+- hwlocks:
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: reference to a hwspinlock used to protect allocations from
+ the shared memory
+
+= EXAMPLE
+The following example shows the SMEM setup for MSM8974, with a main SMEM region
+at 0xfa00000 and an auxiliary region at 0xfc428000:
+
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ smem_region: smem@fa00000 {
+ reg = <0xfa00000 0x200000>;
+ no-map;
+ };
+ };
+
+ smem@fa00000 {
+ compatible = "qcom,smem";
+
+ memory-region = <&smem_region>;
+ reg = <0xfc428000 0x4000>;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/qcom-tsens.txt b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
new file mode 100644
index 000000000000..8b1f26fa2552
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/qcom-tsens.txt
@@ -0,0 +1,33 @@
+* QCOM SoC Temperature Sensor (TSENS)
+
+Required properties:
+- compatible :
+ - "qcom,msm8916-tsens" : For 8916 Family of SoCs
+ - "qcom,msm8974-tsens" : For 8974 Family of SoCs
+
+- reg: Address range of the thermal registers
+- qcom,tsens-slopes : Must contain slope value for each of the sensors controlled
+ by this device
+- #thermal-sensor-cells : Should be 1. See ./thermal.txt for a description.
+- Refer to Documentation/devicetree/bindings/nvmem/nvmem.txt to know how to specify
+nvmem cells
+
+Optional properties:
+- qcom,sensor-id: List of sensor instances used in a given SoC. A TSENS IP can
+ have a fixed number of sensors (like 11) but a given SoC can
+ use only 5 of these and they might not always the first 5. They
+ could be sensors 0, 1, 4, 8 and 9. This property is used to
+ describe the subset of the sensors used. If this property is
+ missing they are assumed to be the first 'n' sensors numbered
+ sequentially in which case the number of sensors defaults to
+ the number of slope values.
+
+Example:
+tsens: thermal-sensor@900000 {
+ compatible = "qcom,msm8916-tsens";
+ nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+ nvmem-cell-names = "caldata", "calsel";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200>;
+ qcom,sensor-id = <0 1 2 4 5>;
+ #thermal-sensor-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index a057b75ba4b5..db48bad84228 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -46,6 +46,11 @@ Optional properties:
(4 bytes), This register represents the maximum length of a the burst
in 32-bit words while moving data from the USB bus to system memory,
changing this value takes effect only the SBUSCFG.AHBBRST is 0.
+- extcon: phandles to external connector devices. First phandle should point to
+ external connector, which provide "USB" cable events, the second should point
+ to external connector device, which provide "USB-HOST" cable events. If one
+ of the external connector devices is not required, empty <0> phandle should
+ be specified.
Example:
@@ -62,4 +67,5 @@ Example:
ahb-burst-config = <0x0>;
tx-burst-size-dword = <0x10>; /* 64 bytes */
rx-burst-size-dword = <0x10>;
+ extcon = <0>, <&usb_id>;
};
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index bb8fa023d574..7bf0eee4deff 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -486,6 +486,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
dtb-$(CONFIG_ARCH_PRIMA2) += \
prima2-evb.dtb
dtb-$(CONFIG_ARCH_QCOM) += \
+ qcom-apq8064-eI_ERAGON600.dtb \
qcom-apq8064-cm-qs600.dtb \
qcom-apq8064-ifc6410.dtb \
qcom-apq8074-dragonboard.dtb \
diff --git a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
index 47c0282bdfca..1ba01ee17934 100644
--- a/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
@@ -1,4 +1,6 @@
#include "qcom-apq8064-v2.0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
model = "CompuLab CM-QS600";
@@ -12,12 +14,27 @@
stdout-path = "serial0:115200n8";
};
+ pwrseq {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ compatible = "simple-bus";
+
+ sdcc4_pwrseq: sdcc4_pwrseq {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_gpios>;
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>;
+ };
+ };
+
soc {
pinctrl@800000 {
- i2c1_pins: i2c1 {
+ card_detect: card_detect {
mux {
- pins = "gpio20", "gpio21";
- function = "gsbi1";
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
};
};
};
@@ -86,9 +103,24 @@
regulator-max-microvolt = <1900000>;
bias-pull-down;
};
+
+ pm8921_lvs6: lvs6 {
+ bias-pull-down;
+ };
+
};
};
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&v3p3_fixed>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ };
+
gsbi@12440000 {
status = "okay";
qcom,mode = <GSBI_PROT_I2C>;
@@ -163,6 +195,30 @@
regulator-always-on;
};
+ qcom,ssbi@500000 {
+ pmic@0 {
+ gpio@150 {
+ wlan_default_gpios: wlan-gpios {
+ pios {
+ pins = "gpio43";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+ };
+ };
+ };
+
+ pci@1b500000 {
+ status = "ok";
+ pcie-clk-supply = <&v3p3_fixed>;
+ avdd-supply = <&pm8921_s3>;
+ vdd-supply = <&pm8921_lvs6>;
+ qcom,external-phy-refclk;
+ reset-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+ };
+
amba {
/* eMMC */
sdcc1: sdcc@12400000 {
@@ -175,12 +231,16 @@
sdcc3: sdcc@12180000 {
status = "okay";
vmmc-supply = <&v3p3_fixed>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&card_detect>;
+ cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
};
/* WLAN */
sdcc4: sdcc@121c0000 {
status = "okay";
vmmc-supply = <&v3p3_fixed>;
vqmmc-supply = <&v3p3_fixed>;
+ mmc-pwrseq = <&sdcc4_pwrseq>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts
new file mode 100644
index 000000000000..40cf6c8eee07
--- /dev/null
+++ b/arch/arm/boot/dts/qcom-apq8064-eI_ERAGON600.dts
@@ -0,0 +1,427 @@
+#include "qcom-apq8064-v2.0.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
+
+/ {
+ model = "Qualcomm APQ8064/ERAGON600";
+ compatible = "qcom,apq8064-eragon600", "qcom,apq8064";
+
+ aliases {
+ serial0 = &gsbi7_serial;
+ serial1 = &gsbi6_serial;
+ };
+
+ soc {
+ pinctrl@800000 {
+ card_detect: card_detect {
+ mux {
+ pins = "gpio26";
+ function = "gpio";
+ bias-disable;
+ };
+ };
+ };
+
+ rpm@108000 {
+ regulators {
+ vin_lvs1_3_6-supply = <&pm8921_s4>;
+ vin_lvs2-supply = <&pm8921_s1>;
+ vin_lvs4_5_7-supply = <&pm8921_s4>;
+
+ vdd_l1_l2_l12_l18-supply = <&pm8921_s4>;
+ vdd_l24-supply = <&pm8921_s1>;
+ vdd_l25-supply = <&pm8921_s1>;
+ vdd_l26-supply = <&pm8921_s7>;
+ vdd_l27-supply = <&pm8921_s7>;
+ vdd_l28-supply = <&pm8921_s7>;
+
+
+ /* Buck SMPS */
+ pm8921_s1: s1 {
+ regulator-always-on;
+ regulator-min-microvolt = <1225000>;
+ regulator-max-microvolt = <1225000>;
+ qcom,switch-mode-frequency = <3200000>;
+ bias-pull-down;
+ };
+
+ pm8921_s3: s3 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1400000>;
+ qcom,switch-mode-frequency = <4800000>;
+ };
+
+ pm8921_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ qcom,switch-mode-frequency = <3200000>;
+ qcom,force-mode = <3>;
+ };
+
+ pm8921_s7: s7 {
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <1300000>;
+ qcom,switch-mode-frequency = <3200000>;
+ };
+
+ pm8921_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
+ pm8921_l3: l3 {
+ regulator-min-microvolt = <3050000>;
+ regulator-max-microvolt = <3300000>;
+ bias-pull-down;
+ };
+
+ pm8921_l4: l4 {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1800000>;
+ bias-pull-down;
+ };
+
+ pm8921_l5: l5 {
+ regulator-min-microvolt = <2750000>;
+ regulator-max-microvolt = <3000000>;
+ bias-pull-down;
+ };
+
+ pm8921_l6: l6 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
+ bias-pull-down;
+ };
+
+ pm8921_l23: l23 {
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <1900000>;
+ bias-pull-down;
+ };
+
+ pm8921_lvs1: lvs1 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs6: lvs6 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs7: lvs7 {
+ bias-pull-down;
+ };
+ };
+ };
+
+ ext_3p3v: regulator-fixed@1 {
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "ext_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&tlmm_pinmux 77 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&ext_3p3v>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ lvds-vccs-3p3v-supply = <&ext_3p3v>;
+ lvds-pll-vdda-supply = <&pm8921_l2>;
+ lvds-vdda-supply = <&pm8921_lvs7>;
+
+ port {
+ lvds_out: endpoint {
+ remote-endpoint = <&data_image_in>;
+ };
+ };
+ };
+
+ panel_3p3v: panel_3p3v {
+ pinctrl-0 = <&pwm_en_gpios>;
+ pinctrl-names = "default";
+ compatible = "regulator-fixed";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "panel_en_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ backlight: backlight{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&pm8921_gpio 26 GPIO_ACTIVE_HIGH>;
+ default-on;
+ };
+
+ levelshifter: levelshifter{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&tlmm_pinmux 85 GPIO_ACTIVE_HIGH>;
+ startup-delay-us = <2000>;
+ default-on;
+ };
+
+ panel: data_image,scf0700C48ggu21 {
+ status = "okay";
+ compatible = "data_image,scf0700C48ggu21";
+
+ ddc-i2c-bus = <&i2c3>;
+ backlight = <&backlight>;
+ power-supply = <&panel_3p3v>;
+ port {
+ data_image_in: endpoint {
+ remote-endpoint = <&lvds_out>;
+ };
+ };
+ };
+
+ gsbi3: gsbi@16200000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+ i2c3: i2c@16280000 {
+ status = "okay";
+ pinctrl-0 = <&i2c3_pins>;
+ pinctrl-names = "default";
+ };
+ };
+
+ gsbi@12440000 {
+ status = "okay";
+ qcom,mode = <GSBI_PROT_I2C>;
+
+ i2c@12460000 {
+ status = "okay";
+ clock-frequency = <200000>;
+ pinctrl-0 = <&i2c1_pins>;
+ pinctrl-names = "default";
+
+ eeprom: eeprom@52 {
+ compatible = "atmel,24c128";
+ reg = <0x52>;
+ pagesize = <32>;
+ };
+ };
+ };
+
+ gsbi@16500000 {
+ status = "ok";
+ qcom,mode = <GSBI_PROT_UART_W_FC>;
+
+ serial@16540000 {
+ status = "ok";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_pins>;
+ };
+ };
+
+ gsbi@16600000 {
+ status = "ok";
+ qcom,mode = <GSBI_PROT_I2C_UART>;
+ serial@16640000 {
+ status = "ok";
+ };
+ };
+
+ adm: dma@18320000 {
+ status = "okay";
+ };
+
+ sata_phy0: phy@1b400000 {
+ status = "okay";
+ };
+
+ sata0: sata@29000000 {
+ status = "okay";
+ target-supply = <&pm8921_s4>;
+ };
+
+ /* OTG */
+ usb1_phy: phy@12500000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l4>;
+ };
+
+ usb3_phy: phy@12520000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ usb4_phy: phy@12530000 {
+ status = "okay";
+ vddcx-supply = <&pm8921_s3>;
+ v3p3-supply = <&pm8921_l3>;
+ v1p8-supply = <&pm8921_l23>;
+ };
+
+ gadget1: gadget@12500000 {
+ status = "okay";
+ };
+
+ /* OTG */
+ usb1: usb@12500000 {
+ status = "okay";
+ };
+
+ usb3: usb@12520000 {
+ status = "okay";
+ };
+
+ usb4: usb@12530000 {
+ status = "okay";
+ };
+
+ /* on board fixed 3.3v supply */
+ v3p3_pcieclk: v3p3-pcieclk {
+ compatible = "regulator-fixed";
+ regulator-name = "PCIE V3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ pci@1b500000 {
+ status = "ok";
+ pcie-clk-supply = <&v3p3_pcieclk>;
+ avdd-supply = <&pm8921_s3>;
+ vdd-supply = <&pm8921_lvs6>;
+ ext-3p3v-supply = <&ext_3p3v>;
+ qcom,external-phy-refclk;
+ reset-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&notify_led>;
+
+ led@1 {
+ label = "apq8064:green:user1";
+ gpios = <&pm8921_gpio 18 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+ };
+
+ qcom,ssbi@500000 {
+ pmicintc: pmic@0 {
+ pm8921_gpio: gpio@150 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_gpios &bt_gpios>;
+
+ pwm_bl_gpios: pwm-bl-gpios {
+ pios {
+ pins = "gpio26";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <1>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ pwm_en_gpios: pwm-en-gpios {
+ pios {
+ pins = "gpio36";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <1>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ wlan_default_gpios: wlan-gpios {
+ pios {
+ pins = "gpio43";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ bt_gpios: bt-gpio {
+ pios {
+ pins = "gpio44";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ notify_led: nled {
+ pios {
+ pins = "gpio18";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+ };
+ };
+ };
+ sdcc4_pwrseq:pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>,
+ <&pm8921_gpio 44 GPIO_ACTIVE_LOW>;
+ };
+
+ amba {
+ /* eMMC */
+ sdcc1: sdcc@12400000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l5>;
+ vqmmc-supply = <&pm8921_s4>;
+ };
+
+ /* External micro SD card */
+ sdcc3: sdcc@12180000 {
+ status = "okay";
+ vmmc-supply = <&pm8921_l6>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&card_detect>;
+ cd-gpios = <&tlmm_pinmux 26 GPIO_ACTIVE_LOW>;
+ };
+ /* WLAN */
+ sdcc4: sdcc@121c0000 {
+ status = "okay";
+ vmmc-supply = <&ext_3p3v>;
+ vqmmc-supply = <&pm8921_lvs1>;
+ mmc-pwrseq = <&sdcc4_pwrseq>;
+ };
+ };
+ };
+};
+
+&CPU0 {
+ cpu-supply = <&saw0>;
+};
+
+&CPU1 {
+ cpu-supply = <&saw1>;
+};
+
+&CPU2 {
+ cpu-supply = <&saw2>;
+};
+
+&CPU3 {
+ cpu-supply = <&saw3>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
index f3100da082b2..19b16218e3c8 100644
--- a/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
+++ b/arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
@@ -1,5 +1,6 @@
#include "qcom-apq8064-v2.0.dtsi"
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
/ {
model = "Qualcomm APQ8064/IFC6410";
@@ -14,6 +15,18 @@
stdout-path = "serial0:115200n8";
};
+ pwrseq {
+ compatible = "simple-bus";
+
+ sdcc4_pwrseq: sdcc4_pwrseq {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_gpios &bt_gpios>;
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&pm8921_gpio 43 GPIO_ACTIVE_LOW>,
+ <&pm8921_gpio 44 GPIO_ACTIVE_LOW>;
+ };
+ };
+
soc {
pinctrl@800000 {
card_detect: card_detect {
@@ -66,6 +79,12 @@
qcom,switch-mode-frequency = <3200000>;
};
+ pm8921_l2: l2 {
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ bias-pull-down;
+ };
+
pm8921_l3: l3 {
regulator-min-microvolt = <3050000>;
regulator-max-microvolt = <3300000>;
@@ -99,6 +118,14 @@
pm8921_lvs1: lvs1 {
bias-pull-down;
};
+
+ pm8921_lvs6: lvs6 {
+ bias-pull-down;
+ };
+
+ pm8921_lvs7: lvs7 {
+ bias-pull-down;
+ };
};
};
@@ -114,6 +141,61 @@
regulator-boot-on;
};
+ hdmi: qcom,hdmi-tx@4a00000 {
+ status = "okay";
+ core-vdda-supply = <&pm8921_hdmi_switch>;
+ hdmi-mux-supply = <&ext_3p3v>;
+ };
+
+ mdp: qcom,mdp@5100000 {
+ status = "okay";
+ lvds-vccs-3p3v-supply = <&ext_3p3v>;
+ lvds-pll-vdda-supply = <&pm8921_l2>;
+ lvds-vdda-supply = <&pm8921_lvs7>;
+
+ port {
+ lvds_out: endpoint {
+ remote-endpoint = <&auo_in>;
+ };
+ };
+ };
+
+ panel_3p3v: panel_3p3v {
+ compatible = "regulator-fixed";
+ pinctrl-0 = <&disp_en_gpios>;
+ pinctrl-names = "default";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-name = "panel_en_3p3v";
+ regulator-type = "voltage";
+ startup-delay-us = <0>;
+ gpio = <&pm8921_gpio 36 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ regulator-boot-on;
+ };
+
+ backlight: backlight{
+ pinctrl-0 = <&pwm_bl_gpios>;
+ pinctrl-names = "default";
+ compatible = "gpio-backlight";
+ gpios = <&pm8921_gpio 26 GPIO_ACTIVE_HIGH>;
+ default-on;
+ };
+
+ panel: auo,b101xtn01 {
+ status = "okay";
+ compatible = "auo,b101xtn01";
+
+ ddc-i2c-bus = <&i2c3>;
+ backlight = <&backlight>;
+ power-supply = <&panel_3p3v>;
+ port {
+ auo_in: endpoint {
+ remote-endpoint = <&lvds_out>;
+ };
+ };
+ };
+
gsbi3: gsbi@16200000 {
status = "okay";
qcom,mode = <GSBI_PROT_I2C>;
@@ -144,7 +226,7 @@
gsbi@16500000 {
status = "ok";
- qcom,mode = <GSBI_PROT_I2C_UART>;
+ qcom,mode = <GSBI_PROT_UART_W_FC>;
serial@16540000 {
status = "ok";
@@ -162,6 +244,10 @@
};
};
+ adm: dma@18320000 {
+ status = "okay";
+ };
+
sata_phy0: phy@1b400000 {
status = "okay";
};
@@ -209,6 +295,93 @@
usb4: usb@12530000 {
status = "okay";
};
+
+ /* on board fixed 3.3v supply */
+ v3p3_pcieclk: v3p3-pcieclk {
+ compatible = "regulator-fixed";
+ regulator-name = "PCIE V3P3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ pci@1b500000 {
+ status = "ok";
+ pcie-clk-supply = <&v3p3_pcieclk>;
+ avdd-supply = <&pm8921_s3>;
+ vdd-supply = <&pm8921_lvs6>;
+ ext-3p3v-supply = <&ext_3p3v>;
+ qcom,external-phy-refclk;
+ reset-gpio = <&tlmm_pinmux 27 GPIO_ACTIVE_LOW>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+ pinctrl-names = "default";
+ pinctrl-0 = <&notify_led>;
+
+ led@1 {
+ label = "apq8064:green:user1";
+ gpios = <&pm8921_gpio 18 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+ };
+
+ qcom,ssbi@500000 {
+ pmic@0 {
+ gpio@150 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&wlan_default_gpios>;
+
+ pwm_bl_gpios: pwm-bl-gpios {
+ pios {
+ pins = "gpio26";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ disp_en_gpios: disp-en-gpios {
+ pios {
+ pins = "gpio36";
+ bias-disable;
+ function = "normal";
+ qcom,drive-strength = <PMIC_GPIO_STRENGTH_HIGH>;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ wlan_default_gpios: wlan-gpios {
+ pios {
+ pins = "gpio43";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ bt_gpios: bt-gpio {
+ pios {
+ pins = "gpio44";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+
+ notify_led: nled {
+ pios {
+ pins = "gpio18";
+ function = "normal";
+ bias-disable;
+ power-source = <PM8921_GPIO_S4>;
+ };
+ };
+ };
+ };
+ };
amba {
/* eMMC */
@@ -231,7 +404,24 @@
status = "okay";
vmmc-supply = <&ext_3p3v>;
vqmmc-supply = <&pm8921_lvs1>;
+ mmc-pwrseq = <&sdcc4_pwrseq>;
};
};
};
};
+
+&CPU0 {
+ cpu-supply = <&saw0>;
+};
+
+&CPU1 {
+ cpu-supply = <&saw1>;
+};
+
+&CPU2 {
+ cpu-supply = <&saw2>;
+};
+
+&CPU3 {
+ cpu-supply = <&saw3>;
+};
diff --git a/arch/arm/boot/dts/qcom-apq8064.dtsi b/arch/arm/boot/dts/qcom-apq8064.dtsi
index d2e94d647c27..e8f8e468c972 100644
--- a/arch/arm/boot/dts/qcom-apq8064.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8064.dtsi
@@ -1,11 +1,16 @@
/dts-v1/;
#include "skeleton.dtsi"
+#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/qcom,gcc-msm8960.h>
#include <dt-bindings/reset/qcom,gcc-msm8960.h>
#include <dt-bindings/clock/qcom,mmcc-msm8960.h>
#include <dt-bindings/soc/qcom,gsbi.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/thermal/thermal.h>
+
/ {
model = "Qualcomm APQ8064";
compatible = "qcom,apq8064";
@@ -15,7 +20,7 @@
#address-cells = <1>;
#size-cells = <0>;
- cpu@0 {
+ CPU0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -24,9 +29,15 @@
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 0>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@1 {
+ CPU1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -35,9 +46,15 @@
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 1>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@2 {
+ CPU2: cpu@2 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -46,9 +63,15 @@
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 2>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
- cpu@3 {
+ CPU3: cpu@3 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v1";
device_type = "cpu";
@@ -57,6 +80,12 @@
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 3>, <&kraitcc 4>;
+ clock-names = "cpu", "l2";
+ clock-latency = <100000>;
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
L2: l2-cache {
@@ -64,6 +93,10 @@
cache-level = <2>;
};
+ qcom,l2 {
+ qcom,l2-rates = <384000000 972000000 1188000000>;
+ };
+
idle-states {
CPU_SPC: spc {
compatible = "qcom,idle-state-spc",
@@ -75,11 +108,620 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&CPU1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 9>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert2>;
+ cooling-device = <&CPU2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 10>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <110000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert3>;
+ cooling-device = <&CPU3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 7>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 8>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 9>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&gcc 10>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 10 0x304>;
};
+ qcom,pvs {
+ qcom,pvs-format-a;
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >,
+ < 1026000000 1125000 >,
+ < 1080000000 1175000 >,
+ < 1134000000 1175000 >,
+ < 1188000000 1200000 >,
+ < 1242000000 1200000 >,
+ < 1296000000 1225000 >,
+ < 1350000000 1225000 >,
+ < 1404000000 1237500 >,
+ < 1458000000 1237500 >,
+ < 1512000000 1250000 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 925000 >,
+ < 594000000 950000 >,
+ < 702000000 975000 >,
+ < 810000000 1025000 >,
+ < 918000000 1050000 >,
+ < 1026000000 1075000 >,
+ < 1080000000 1125000 >,
+ < 1134000000 1125000 >,
+ < 1188000000 1150000 >,
+ < 1242000000 1150000 >,
+ < 1296000000 1175000 >,
+ < 1350000000 1175000 >,
+ < 1404000000 1187500 >,
+ < 1458000000 1187500 >,
+ < 1512000000 1200000 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1025000 >,
+ < 1080000000 1075000 >,
+ < 1134000000 1075000 >,
+ < 1188000000 1100000 >,
+ < 1242000000 1100000 >,
+ < 1296000000 1125000 >,
+ < 1350000000 1125000 >,
+ < 1404000000 1137500 >,
+ < 1458000000 1137500 >,
+ < 1512000000 1150000 >;
+
+ qcom,speed0-pvs4-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 962500 >,
+ < 918000000 975000 >,
+ < 1026000000 1000000 >,
+ < 1080000000 1050000 >,
+ < 1134000000 1050000 >,
+ < 1188000000 1075000 >,
+ < 1242000000 1075000 >,
+ < 1296000000 1100000 >,
+ < 1350000000 1100000 >,
+ < 1404000000 1112500 >,
+ < 1458000000 1112500 >,
+ < 1512000000 1125000 >;
+
+ qcom,speed1-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 1000000 >,
+ < 918000000 1025000 >,
+ < 1026000000 1037500 >,
+ < 1134000000 1075000 >,
+ < 1242000000 1087500 >,
+ < 1350000000 1125000 >,
+ < 1458000000 1150000 >,
+ < 1566000000 1175000 >,
+ < 1674000000 1225000 >,
+ < 1728000000 1250000 >;
+
+ qcom,speed1-pvs1-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1012500 >,
+ < 1134000000 1037500 >,
+ < 1242000000 1050000 >,
+ < 1350000000 1087500 >,
+ < 1458000000 1112500 >,
+ < 1566000000 1150000 >,
+ < 1674000000 1187500 >,
+ < 1728000000 1200000 >;
+
+ qcom,speed1-pvs2-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1566000000 1100000 >,
+ < 1674000000 1137500 >,
+ < 1728000000 1162500 >;
+
+ qcom,speed1-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1037500 >,
+ < 1566000000 1062500 >,
+ < 1674000000 1100000 >,
+ < 1728000000 1125000 >;
+
+ qcom,speed1-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1566000000 1037500 >,
+ < 1674000000 1075000 >,
+ < 1728000000 1100000 >;
+
+ qcom,speed1-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1566000000 1012500 >,
+ < 1674000000 1050000 >,
+ < 1728000000 1075000 >;
+
+ qcom,speed1-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1566000000 1000000 >,
+ < 1674000000 1025000 >,
+ < 1728000000 1050000 >;
+
+ qcom,speed2-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 950000 >,
+ < 810000000 962500 >,
+ < 918000000 975000 >,
+ < 1026000000 1000000 >,
+ < 1134000000 1025000 >,
+ < 1242000000 1037500 >,
+ < 1350000000 1062500 >,
+ < 1458000000 1100000 >,
+ < 1566000000 1125000 >,
+ < 1674000000 1175000 >,
+ < 1782000000 1225000 >,
+ < 1890000000 1287500 >;
+
+ qcom,speed2-pvs1-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1566000000 1100000 >,
+ < 1674000000 1137500 >,
+ < 1782000000 1187500 >,
+ < 1890000000 1250000 >;
+
+ qcom,speed2-pvs2-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 912500 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1012500 >,
+ < 1458000000 1050000 >,
+ < 1566000000 1075000 >,
+ < 1674000000 1112500 >,
+ < 1782000000 1162500 >,
+ < 1890000000 1212500 >;
+
+ qcom,speed2-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 912500 >,
+ < 1026000000 937500 >,
+ < 1134000000 962500 >,
+ < 1242000000 975000 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1025000 >,
+ < 1566000000 1050000 >,
+ < 1674000000 1087500 >,
+ < 1782000000 1137500 >,
+ < 1890000000 1175000 >;
+
+ qcom,speed2-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1566000000 1037500 >,
+ < 1674000000 1075000 >,
+ < 1782000000 1112500 >,
+ < 1890000000 1150000 >;
+
+ qcom,speed2-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1566000000 1012500 >,
+ < 1674000000 1050000 >,
+ < 1782000000 1087500 >,
+ < 1890000000 1125000 >;
+
+ qcom,speed2-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1566000000 1000000 >,
+ < 1674000000 1025000 >,
+ < 1782000000 1062500 >,
+ < 1890000000 1100000 >;
+
+ qcom,speed14-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 1000000 >,
+ < 918000000 1025000 >,
+ < 1026000000 1037500 >,
+ < 1134000000 1075000 >,
+ < 1242000000 1087500 >,
+ < 1350000000 1125000 >,
+ < 1458000000 1150000 >,
+ < 1512000000 1162500 >;
+
+ qcom,speed14-pvs1-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 950000 >,
+ < 594000000 950000 >,
+ < 702000000 962500 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >,
+ < 1026000000 1012500 >,
+ < 1134000000 1037500 >,
+ < 1242000000 1050000 >,
+ < 1350000000 1087500 >,
+ < 1458000000 1112500 >,
+ < 1512000000 1125000 >;
+
+ qcom,speed14-pvs2-bin-v0 =
+ < 384000000 925000 >,
+ < 486000000 925000 >,
+ < 594000000 925000 >,
+ < 702000000 925000 >,
+ < 810000000 937500 >,
+ < 918000000 950000 >,
+ < 1026000000 975000 >,
+ < 1134000000 1000000 >,
+ < 1242000000 1012500 >,
+ < 1350000000 1037500 >,
+ < 1458000000 1075000 >,
+ < 1512000000 1087500 >;
+
+ qcom,speed14-pvs3-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 900000 >,
+ < 594000000 900000 >,
+ < 702000000 900000 >,
+ < 810000000 900000 >,
+ < 918000000 925000 >,
+ < 1026000000 950000 >,
+ < 1134000000 975000 >,
+ < 1242000000 987500 >,
+ < 1350000000 1000000 >,
+ < 1458000000 1037500 >,
+ < 1512000000 1050000 >;
+
+ qcom,speed14-pvs4-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 950000 >,
+ < 1242000000 962500 >,
+ < 1350000000 975000 >,
+ < 1458000000 1000000 >,
+ < 1512000000 1012500 >;
+
+ qcom,speed14-pvs5-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 987500 >,
+ < 1512000000 1000000 >;
+
+ qcom,speed14-pvs6-bin-v0 =
+ < 384000000 875000 >,
+ < 486000000 875000 >,
+ < 594000000 875000 >,
+ < 702000000 875000 >,
+ < 810000000 887500 >,
+ < 918000000 900000 >,
+ < 1026000000 925000 >,
+ < 1134000000 937500 >,
+ < 1242000000 950000 >,
+ < 1350000000 962500 >,
+ < 1458000000 975000 >,
+ < 1512000000 987500 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
+
+ clocks {
+ sleep_clk: sleep_clk {
+ compatible = "fixed-clock";
+ clock-frequency = <32768>;
+ #clock-cells = <0>;
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -106,6 +748,20 @@
};
};
+ hdmi_pinctrl: hdmi-pinctrl {
+ mux1 {
+ pins = "gpio69", "gpio70", "gpio71";
+ function = "hdmi";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ mux2 {
+ pins = "gpio72";
+ function = "hdmi";
+ bias-pull-down;
+ drive-strength = <16>;
+ };
+ };
ps_hold: ps_hold {
mux {
pins = "gpio78";
@@ -154,48 +810,75 @@
cpu-offset = <0x80000>;
};
+ watchdog@208a038 {
+ compatible = "qcom,kpss-wdt-apq8064";
+ reg = <0x0208a038 0x40>;
+ clocks = <&sleep_clk>;
+ timeout-sec = <10>;
+ };
+
acc0: clock-controller@2088000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
acc1: clock-controller@2098000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu1_aux";
};
acc2: clock-controller@20a8000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x020a8000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu2_aux";
};
acc3: clock-controller@20b8000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x020b8000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu3_aux";
};
saw0: power-controller@2089000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait0";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw1: power-controller@2099000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x02099000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait1";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw2: power-controller@20a9000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020a9000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait2";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
saw3: power-controller@20b9000 {
compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2";
reg = <0x020b9000 0x1000>, <0x02009000 0x1000>;
regulator;
+ regulator-name = "krait3";
+ regulator-always-on;
+ regulator-min-microvolt = <825000>;
+ regulator-max-microvolt = <1250000>;
};
gsbi1: gsbi@12440000 {
@@ -276,6 +959,7 @@
#address-cells = <1>;
#size-cells = <1>;
ranges;
+ syscon-tcsr = <&tcsr>;
gsbi6_serial: serial@16540000 {
compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
@@ -284,6 +968,13 @@
interrupts = <0 156 0x0>;
clocks = <&gcc GSBI6_UART_CLK>, <&gcc GSBI6_H_CLK>;
clock-names = "core", "iface";
+
+ qcom,rx-crci = <11>;
+ qcom,tx-crci = <6>;
+
+ dmas = <&adm 6>, <&adm 7>;
+ dma-names = "rx", "tx";
+
status = "disabled";
};
};
@@ -311,6 +1002,13 @@
};
};
+ rng@1a500000 {
+ compatible = "qcom,prng";
+ reg = <0x1a500000 0x200>;
+ clocks = <&gcc PRNG_CLK>;
+ clock-names = "core";
+ };
+
qcom,ssbi@500000 {
compatible = "qcom,ssbi";
reg = <0x00500000 0x1000>;
@@ -326,7 +1024,6 @@
#size-cells = <0>;
pm8921_gpio: gpio@150 {
-
compatible = "qcom,pm8921-gpio";
reg = <0x150>;
interrupts = <192 1>, <193 1>, <194 1>,
@@ -361,14 +1058,41 @@
<136 1>, <137 1>, <138 1>, <139 1>;
};
+ rtc@11d {
+ compatible = "qcom,pm8921-rtc";
+ interrupt-parent = <&pmicintc>;
+ interrupts = <39 1>;
+ reg = <0x11d>;
+ allow-set-time;
+ };
+
+ };
+ };
+
+ qfprom: qfprom@00700000 {
+ compatible = "qcom,qfprom";
+ reg = <0x00700000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ tsens_calib: calib {
+ reg = <0x404 0x10>;
+ };
+ tsens_backup: backup_calib {
+ reg = <0x414 0x10>;
};
};
gcc: clock-controller@900000 {
compatible = "qcom,gcc-apq8064";
reg = <0x00900000 0x4000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <1176 1176 1154 1176 1111
+ 1132 1132 1199 1132 1199 1132>;
#clock-cells = <1>;
#reset-cells = <1>;
+ #thermal-sensor-cells = <1>;
};
lcc: clock-controller@28000000 {
@@ -394,12 +1118,19 @@
compatible = "qcom,rpm-apq8064";
reg = <0x108000 0x1000>;
qcom,ipc = <&l2cc 0x8 2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
interrupts = <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "ack", "err", "wakeup";
+ rpmcc: rpm-clock-controller {
+ compatible = "qcom,apq8064-rpm-clk";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
regulators {
compatible = "qcom,rpm-pm8921-regulators";
@@ -409,7 +1140,46 @@
};
};
- usb1_phy: phy@12500000 {
+
+ /* PCIE */
+
+ pci@1b500000 {
+ compatible = "qcom,pcie-ipq8064";
+ reg = <0x1b500000 0x1000>, <0x1b502000 0x100>, <0x1b600000 0x80>;
+ reg-names = "base", "elbi", "parf";
+ status = "disabled";
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ interrupts = <0 35 0x0
+ 0 36 0x0
+ 0 37 0x0
+ 0 38 0x0
+ 0 39 0x0
+ 0 238 0x0>;
+ interrupt-names = "irq1", "irq2", "irq3", "irq4", "iqr5", "msi";
+
+ resets = <&gcc PCIE_ACLK_RESET>,
+ <&gcc PCIE_HCLK_RESET>,
+ <&gcc PCIE_POR_RESET>,
+ <&gcc PCIE_PCI_RESET>,
+ <&gcc PCIE_PHY_RESET>;
+ reset-names = "axi", "ahb", "por", "pci", "phy";
+
+ clocks = <&gcc PCIE_A_CLK>,
+ <&gcc PCIE_H_CLK>,
+ <&gcc PCIE_PHY_REF_CLK>;
+ clock-names = "core", "iface", "phy";
+
+ ranges = <0x00000000 0 0 0x0ff00000 0 0x00100000 /* configuration space */
+ 0x81000000 0 0 0x0fe00000 0 0x00100000 /* downstream I/O */
+ 0x82000000 0 0 0x08000000 0 0x07e00000>; /* non-prefetchable memory */
+
+ };
+
+
+
+ usb1_phy:phy@12500000 {
compatible = "qcom,usb-otg-ci";
reg = <0x12500000 0x400>;
interrupts = <GIC_SPI 100 IRQ_TYPE_NONE>;
@@ -614,9 +1384,174 @@
};
};
+ adm: dma@18320000 {
+ compatible = "qcom,adm";
+ reg = <0x18320000 0xE0000>;
+ interrupts = <GIC_SPI 171 IRQ_TYPE_NONE>;
+ #dma-cells = <1>;
+
+ clocks = <&gcc ADM0_CLK>, <&gcc ADM0_PBUS_CLK>;
+ clock-names = "core", "iface";
+
+ resets = <&gcc ADM0_RESET>,
+ <&gcc ADM0_PBUS_RESET>,
+ <&gcc ADM0_C0_RESET>,
+ <&gcc ADM0_C1_RESET>,
+ <&gcc ADM0_C2_RESET>;
+ reset-names = "clk", "pbus", "c0", "c1", "c2";
+ qcom,ee = <1>;
+
+ status = "disabled";
+ };
+
tcsr: syscon@1a400000 {
compatible = "qcom,tcsr-apq8064", "syscon";
reg = <0x1a400000 0x100>;
};
+
+ hdmi: qcom,hdmi-tx@4a00000 {
+ compatible = "qcom,hdmi-tx-8960";
+ reg-names = "core_physical";
+ reg = <0x04a00000 0x1000>;
+ interrupts = <GIC_SPI 79 0>;
+ clock-names =
+ "core_clk",
+ "master_iface_clk",
+ "slave_iface_clk";
+ clocks =
+ <&mmcc HDMI_APP_CLK>,
+ <&mmcc HDMI_M_AHB_CLK>,
+ <&mmcc HDMI_S_AHB_CLK>;
+ qcom,hdmi-tx-ddc-clk = <&tlmm_pinmux 70 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-ddc-data = <&tlmm_pinmux 71 GPIO_ACTIVE_HIGH>;
+ qcom,hdmi-tx-hpd = <&tlmm_pinmux 72 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&hdmi_pinctrl>;
+ };
+
+ gpu: qcom,adreno-3xx@4300000 {
+ compatible = "qcom,adreno-3xx";
+ reg = <0x04300000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <GIC_SPI 80 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_clk",
+ "mem_iface_clk";
+ clocks =
+ <&mmcc GFX3D_CLK>,
+ <&mmcc GFX3D_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>,
+ <&mmcc MMSS_IMEM_AHB_CLK>;
+ qcom,chipid = <0x03020002>;
+
+ iommus = <&gfx3d 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ &gfx3d 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
+ &gfx3d1 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ &gfx3d1 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31>;
+
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <450000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <27000000>;
+ };
+ };
+ };
+
+ mdp: qcom,mdp@5100000 {
+ compatible = "qcom,mdp";
+ reg = <0x05100000 0xf0000>;
+ interrupts = <GIC_SPI 75 0>;
+ connectors = <&hdmi>;
+ gpus = <&gpu>;
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "lut_clk",
+ "src_clk",
+ "hdmi_clk",
+ "mdp_clk",
+ "mdp_axi_clk";
+ clocks =
+ <&mmcc MDP_CLK>,
+ <&mmcc MDP_AHB_CLK>,
+ <&mmcc MDP_LUT_CLK>,
+ <&mmcc TV_SRC>,
+ <&mmcc HDMI_TV_CLK>,
+ <&mmcc MDP_TV_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+
+ iommus = <&mdp_port0 0 2
+ &mdp_port1 0 2>;
+ };
+
+ mdp_port0: qcom,iommu@7500000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07500000 0x100000>;
+ interrupts =
+ <GIC_SPI 63 0>,
+ <GIC_SPI 64 0>;
+ ncb = <2>;
+ };
+
+ mdp_port1: qcom,iommu@7600000 {
+ compatible = "qcom,iommu";
+ #iommu-cells = <2>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc MDP_AXI_CLK>;
+ reg = <0x07600000 0x100000>;
+ interrupts =
+ <GIC_SPI 61 0>,
+ <GIC_SPI 62 0>;
+ ncb = <2>;
+ };
+
+ gfx3d: qcom,iommu@7c00000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <16>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07c00000 0x100000>;
+ interrupts =
+ <GIC_SPI 69 0>,
+ <GIC_SPI 70 0>;
+ ncb = <3>;
+ };
+
+ gfx3d1: qcom,iommu@7d00000 {
+ compatible = "qcom,iommu-v0";
+ #iommu-cells = <16>;
+ clock-names =
+ "smmu_pclk",
+ "iommu_clk";
+ clocks =
+ <&mmcc SMMU_AHB_CLK>,
+ <&mmcc GFX3D_AXI_CLK>;
+ reg = <0x07d00000 0x100000>;
+ interrupts =
+ <GIC_SPI 210 0>,
+ <GIC_SPI 211 0>;
+ ncb = <3>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom-apq8084.dtsi b/arch/arm/boot/dts/qcom-apq8084.dtsi
index 0554fbd72c40..477173c55d0b 100644
--- a/arch/arm/boot/dts/qcom-apq8084.dtsi
+++ b/arch/arm/boot/dts/qcom-apq8084.dtsi
@@ -75,6 +75,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -103,6 +185,29 @@
<0xf9002000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200 3200
+ 3200 3200 3200 3200 3200>;
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -221,6 +326,7 @@
compatible = "qcom,gcc-apq8084";
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
reg = <0xfc400000 0x4000>;
};
diff --git a/arch/arm/boot/dts/qcom-msm8960.dtsi b/arch/arm/boot/dts/qcom-msm8960.dtsi
index 134cd91d68ec..1111debb1376 100644
--- a/arch/arm/boot/dts/qcom-msm8960.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8960.dtsi
@@ -25,6 +25,10 @@
next-level-cache = <&L2>;
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
+ clocks = <&kraitcc 0>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
+
};
cpu@1 {
@@ -35,6 +39,10 @@
next-level-cache = <&L2>;
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
+ clocks = <&kraitcc 1>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
+
};
L2: l2-cache {
@@ -49,6 +57,39 @@
qcom,no-pc-write;
};
+ qcom,pvs {
+ qcom,pvs-format-a;
+ /* Hz uV */
+ qcom,speed0-pvs0-bin-v0 =
+ < 384000000 950000 >,
+ < 486000000 975000 >,
+ < 594000000 1000000 >,
+ < 702000000 1025000 >,
+ < 810000000 1075000 >,
+ < 918000000 1100000 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 384000000 900000 >,
+ < 486000000 925000 >,
+ < 594000000 950000 >,
+ < 702000000 975000 >,
+ < 810000000 1025000 >,
+ < 918000000 1050000 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 384000000 850000 >,
+ < 486000000 875000 >,
+ < 594000000 900000 >,
+ < 702000000 925000 >,
+ < 810000000 975000 >,
+ < 918000000 1000000 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v1";
+ #clock-cells = <1>;
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -105,11 +146,6 @@
#reset-cells = <1>;
};
- l2cc: clock-controller@2011000 {
- compatible = "syscon";
- reg = <0x2011000 0x1000>;
- };
-
rpm@108000 {
compatible = "qcom,rpm-msm8960";
reg = <0x108000 0x1000>;
@@ -126,11 +162,19 @@
acc0: clock-controller@2088000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02088000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu0_aux";
};
acc1: clock-controller@2098000 {
compatible = "qcom,kpss-acc-v1";
reg = <0x02098000 0x1000>, <0x02008000 0x1000>;
+ clock-output-names = "acpu1_aux";
+ };
+
+ l2cc: clock-controller@2011000 {
+ compatible = "qcom,kpss-gcc";
+ reg = <0x2011000 0x1000>;
+ clock-output-names = "acpu_l2_aux";
};
saw0: regulator@2089000 {
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ab8e57250468..b09986185655 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -25,7 +25,7 @@
#size-cells = <0>;
interrupts = <1 9 0xf04>;
- cpu@0 {
+ cpu0: cpu@0 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -34,9 +34,12 @@
qcom,acc = <&acc0>;
qcom,saw = <&saw0>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 0>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@1 {
+ cpu1: cpu@1 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -45,9 +48,12 @@
qcom,acc = <&acc1>;
qcom,saw = <&saw1>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 1>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@2 {
+ cpu2: cpu@2 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -56,9 +62,12 @@
qcom,acc = <&acc2>;
qcom,saw = <&saw2>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 2>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
- cpu@3 {
+ cpu3: cpu@3 {
compatible = "qcom,krait";
enable-method = "qcom,kpss-acc-v2";
device_type = "cpu";
@@ -67,6 +76,9 @@
qcom,acc = <&acc3>;
qcom,saw = <&saw3>;
cpu-idle-states = <&CPU_SPC>;
+ clocks = <&kraitcc 3>;
+ clock-names = "cpu";
+ clock-latency = <100000>;
};
L2: l2-cache {
@@ -86,6 +98,88 @@
};
};
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 5>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 6>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal2 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 7>;
+
+ trips {
+ cpu_alert2: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit2: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+
+ cpu-thermal3 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 8>;
+
+ trips {
+ cpu_alert3: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit3: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+ };
+ };
+
cpu-pmu {
compatible = "qcom,krait-pmu";
interrupts = <1 7 0xf04>;
@@ -100,6 +194,267 @@
clock-frequency = <19200000>;
};
+ qcom,pvs {
+ qcom,pvs-format-b;
+ /* Hz uV ua */
+ qcom,speed0-pvs0-bin-v0 =
+ < 300000000 815000 73 >,
+ < 345600000 825000 85 >,
+ < 422400000 835000 104 >,
+ < 499200000 845000 124 >,
+ < 576000000 855000 144 >,
+ < 652800000 865000 165 >,
+ < 729600000 875000 186 >,
+ < 806400000 890000 208 >,
+ < 883200000 900000 229 >,
+ < 960000000 915000 252 >;
+
+ qcom,speed0-pvs1-bin-v0 =
+ < 300000000 800000 73 >,
+ < 345600000 810000 85 >,
+ < 422400000 820000 104 >,
+ < 499200000 830000 124 >,
+ < 576000000 840000 144 >,
+ < 652800000 850000 165 >,
+ < 729600000 860000 186 >,
+ < 806400000 875000 208 >,
+ < 883200000 885000 229 >,
+ < 960000000 895000 252 >;
+
+ qcom,speed0-pvs2-bin-v0 =
+ < 300000000 785000 73 >,
+ < 345600000 795000 85 >,
+ < 422400000 805000 104 >,
+ < 499200000 815000 124 >,
+ < 576000000 825000 144 >,
+ < 652800000 835000 165 >,
+ < 729600000 845000 186 >,
+ < 806400000 855000 208 >,
+ < 883200000 865000 229 >,
+ < 960000000 875000 252 >;
+
+ qcom,speed0-pvs3-bin-v0 =
+ < 300000000 775000 73 >,
+ < 345600000 780000 85 >,
+ < 422400000 790000 104 >,
+ < 499200000 800000 124 >,
+ < 576000000 810000 144 >,
+ < 652800000 820000 165 >,
+ < 729600000 830000 186 >,
+ < 806400000 840000 208 >,
+ < 883200000 850000 229 >,
+ < 960000000 860000 252 >;
+
+ qcom,speed0-pvs4-bin-v0 =
+ < 300000000 775000 73 >,
+ < 345600000 775000 85 >,
+ < 422400000 780000 104 >,
+ < 499200000 790000 124 >,
+ < 576000000 800000 144 >,
+ < 652800000 810000 165 >,
+ < 729600000 820000 186 >,
+ < 806400000 830000 208 >,
+ < 883200000 840000 229 >,
+ < 960000000 850000 252 >;
+
+ qcom,speed0-pvs5-bin-v0 =
+ < 300000000 750000 73 >,
+ < 345600000 760000 85 >,
+ < 422400000 770000 104 >,
+ < 499200000 780000 124 >,
+ < 576000000 790000 144 >,
+ < 652800000 800000 165 >,
+ < 729600000 810000 186 >,
+ < 806400000 820000 208 >,
+ < 883200000 830000 229 >,
+ < 960000000 840000 252 >;
+
+ qcom,speed0-pvs6-bin-v0 =
+ < 300000000 750000 73 >,
+ < 345600000 750000 85 >,
+ < 422400000 760000 104 >,
+ < 499200000 770000 124 >,
+ < 576000000 780000 144 >,
+ < 652800000 790000 165 >,
+ < 729600000 800000 186 >,
+ < 806400000 810000 208 >,
+ < 883200000 820000 229 >,
+ < 960000000 830000 252 >;
+
+ qcom,speed2-pvs0-bin-v0 =
+ < 300000000 800000 72 >,
+ < 345600000 800000 83 >,
+ < 422400000 805000 102 >,
+ < 499200000 815000 121 >,
+ < 576000000 825000 141 >,
+ < 652800000 835000 161 >,
+ < 729600000 845000 181 >,
+ < 806400000 855000 202 >,
+ < 883200000 865000 223 >,
+ < 960000000 875000 245 >;
+
+ qcom,speed2-pvs1-bin-v0 =
+ < 300000000 800000 72 >,
+ < 345600000 800000 83 >,
+ < 422400000 800000 102 >,
+ < 499200000 800000 121 >,
+ < 576000000 810000 141 >,
+ < 652800000 820000 161 >,
+ < 729600000 830000 181 >,
+ < 806400000 840000 202 >,
+ < 883200000 850000 223 >,
+ < 960000000 860000 245 >;
+
+ qcom,speed2-pvs2-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 785000 121 >,
+ < 576000000 795000 141 >,
+ < 652800000 805000 161 >,
+ < 729600000 815000 181 >,
+ < 806400000 825000 202 >,
+ < 883200000 835000 223 >,
+ < 960000000 845000 245 >;
+
+ qcom,speed2-pvs3-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 775000 121 >,
+ < 576000000 780000 141 >,
+ < 652800000 790000 161 >,
+ < 729600000 800000 181 >,
+ < 806400000 810000 202 >,
+ < 883200000 820000 223 >,
+ < 960000000 830000 245 >;
+
+ qcom,speed2-pvs4-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 102 >,
+ < 499200000 775000 121 >,
+ < 576000000 775000 141 >,
+ < 652800000 780000 161 >,
+ < 729600000 790000 181 >,
+ < 806400000 800000 202 >,
+ < 883200000 810000 223 >,
+ < 960000000 820000 245 >;
+
+ qcom,speed2-pvs5-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 102 >,
+ < 499200000 750000 121 >,
+ < 576000000 760000 141 >,
+ < 652800000 770000 161 >,
+ < 729600000 780000 181 >,
+ < 806400000 790000 202 >,
+ < 883200000 800000 223 >,
+ < 960000000 810000 245 >;
+
+ qcom,speed2-pvs6-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 102 >,
+ < 499200000 750000 121 >,
+ < 576000000 750000 141 >,
+ < 652800000 760000 161 >,
+ < 729600000 770000 181 >,
+ < 806400000 780000 202 >,
+ < 883200000 790000 223 >,
+ < 960000000 800000 245 >;
+
+ qcom,speed1-pvs0-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 101 >,
+ < 499200000 780000 120 >,
+ < 576000000 790000 139 >,
+ < 652800000 800000 159 >,
+ < 729600000 810000 180 >,
+ < 806400000 820000 200 >,
+ < 883200000 830000 221 >,
+ < 960000000 840000 242 >;
+
+ qcom,speed1-pvs1-bin-v0 =
+ < 300000000 775000 72 >,
+ < 345600000 775000 83 >,
+ < 422400000 775000 101 >,
+ < 499200000 775000 120 >,
+ < 576000000 775000 139 >,
+ < 652800000 785000 159 >,
+ < 729600000 795000 180 >,
+ < 806400000 805000 200 >,
+ < 883200000 815000 221 >,
+ < 960000000 825000 242 >;
+
+ qcom,speed1-pvs2-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 760000 139 >,
+ < 652800000 770000 159 >,
+ < 729600000 780000 180 >,
+ < 806400000 790000 200 >,
+ < 883200000 800000 221 >,
+ < 960000000 810000 242 >;
+
+ qcom,speed1-pvs3-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 750000 139 >,
+ < 652800000 755000 159 >,
+ < 729600000 765000 180 >,
+ < 806400000 775000 200 >,
+ < 883200000 785000 221 >,
+ < 960000000 795000 242 >;
+
+ qcom,speed1-pvs4-bin-v0 =
+ < 300000000 750000 72 >,
+ < 345600000 750000 83 >,
+ < 422400000 750000 101 >,
+ < 499200000 750000 120 >,
+ < 576000000 750000 139 >,
+ < 652800000 750000 159 >,
+ < 729600000 755000 180 >,
+ < 806400000 765000 200 >,
+ < 883200000 775000 221 >,
+ < 960000000 785000 242 >;
+
+ qcom,speed1-pvs5-bin-v0 =
+ < 300000000 725000 72 >,
+ < 345600000 725000 83 >,
+ < 422400000 725000 101 >,
+ < 499200000 725000 120 >,
+ < 576000000 725000 139 >,
+ < 652800000 735000 159 >,
+ < 729600000 745000 180 >,
+ < 806400000 755000 200 >,
+ < 883200000 765000 221 >,
+ < 960000000 775000 242 >;
+
+ qcom,speed1-pvs6-bin-v0 =
+ < 300000000 725000 72 >,
+ < 345600000 725000 83 >,
+ < 422400000 725000 101 >,
+ < 499200000 725000 120 >,
+ < 576000000 725000 139 >,
+ < 652800000 725000 159 >,
+ < 729600000 735000 180 >,
+ < 806400000 745000 200 >,
+ < 883200000 755000 221 >,
+ < 960000000 765000 242 >;
+ };
+
+ kraitcc: clock-controller {
+ compatible = "qcom,krait-cc-v2";
+ #clock-cells = <1>;
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -114,6 +469,29 @@
<0xf9002000 0x1000>;
};
+ qfprom: qfprom@fc4bc000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "qcom,qfprom";
+ reg = <0xfc4bc000 0x1000>;
+ tsens_calib: calib@d0 {
+ reg = <0xd0 0x18>;
+ };
+ tsens_backup: backup@440 {
+ reg = <0x440 0x10>;
+ };
+ };
+
+ tsens: thermal-sensor@fc4a8000 {
+ compatible = "qcom,msm8974-tsens";
+ reg = <0xfc4a8000 0x2000>;
+ nvmem-cells = <&tsens_calib>, <&tsens_backup>;
+ nvmem-cell-names = "calib", "calib_backup";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200 3200
+ 3200 3200 3200 3200 3200>;
+ #thermal-sensor-cells = <1>;
+ };
+
timer@f9020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -193,7 +571,37 @@
reg = <0xf90b9000 0x1000>, <0xf9009000 0x1000>;
};
- saw_l2: power-controller@f9012000 {
+ clock-controller@f9016000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf9016000 0x30>;
+ clock-output-names = "hfpll_l2";
+ };
+
+ clock-controller@f908a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf908a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll0";
+ };
+
+ clock-controller@f909a000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf909a000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll1";
+ };
+
+ clock-controller@f90aa000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf90aa000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll2";
+ };
+
+ clock-controller@f90ba000 {
+ compatible = "qcom,hfpll";
+ reg = <0xf90ba000 0x30>, <0xf900a000 0x30>;
+ clock-output-names = "hfpll3";
+ };
+
+ saw_l2: regulator@f9012000 {
compatible = "qcom,saw2";
reg = <0xf9012000 0x1000>;
regulator;
@@ -228,6 +636,7 @@
compatible = "qcom,gcc-msm8974";
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
reg = <0xfc400000 0x4000>;
};
@@ -240,6 +649,7 @@
compatible = "qcom,mmcc-msm8974";
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
reg = <0xfd8c0000 0x6000>;
};
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index f4a38a76db64..6c9fa6bd7eae 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -9,6 +9,9 @@ config DMABOUNCE
bool
select ZONE_DMA
+config KRAIT_L2_ACCESSORS
+ bool
+
config SHARP_LOCOMO
bool
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 11a42f8bf8a4..263022213664 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o
obj-$(CONFIG_ICST) += icst.o
obj-$(CONFIG_SA1111) += sa1111.o
obj-$(CONFIG_DMABOUNCE) += dmabounce.o
+obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
diff --git a/arch/arm/common/krait-l2-accessors.c b/arch/arm/common/krait-l2-accessors.c
new file mode 100644
index 000000000000..5d514bbc88a6
--- /dev/null
+++ b/arch/arm/common/krait-l2-accessors.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/export.h>
+
+#include <asm/barrier.h>
+#include <asm/krait-l2-accessors.h>
+
+static DEFINE_RAW_SPINLOCK(krait_l2_lock);
+
+void krait_set_l2_indirect_reg(u32 addr, u32 val)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then write to the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mcr p15, 3, %0, c15, c0, 7 @ l2cpdr" : : "r" (val));
+ isb();
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+}
+EXPORT_SYMBOL(krait_set_l2_indirect_reg);
+
+u32 krait_get_l2_indirect_reg(u32 addr)
+{
+ u32 val;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&krait_l2_lock, flags);
+ /*
+ * Select the L2 window by poking l2cpselr, then read from the window
+ * via l2cpdr.
+ */
+ asm volatile ("mcr p15, 3, %0, c15, c0, 6 @ l2cpselr" : : "r" (addr));
+ isb();
+ asm volatile ("mrc p15, 3, %0, c15, c0, 7 @ l2cpdr" : "=r" (val));
+
+ raw_spin_unlock_irqrestore(&krait_l2_lock, flags);
+
+ return val;
+}
+EXPORT_SYMBOL(krait_get_l2_indirect_reg);
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 03deb7fb35e8..46e4087c3546 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -124,8 +124,15 @@ CONFIG_KEXEC=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_QCOM_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_ZYNQ_CPUIDLE=y
+CONFIG_ARM_QCOM_CPUIDLE=y
CONFIG_NEON=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_ARM_ZYNQ_CPUIDLE=y
@@ -153,10 +160,20 @@ CONFIG_CAN_DEV=y
CONFIG_CAN_AT91=m
CONFIG_CAN_XILINXCAN=y
CONFIG_CAN_MCP251X=y
-CONFIG_BT=m
+CONFIG_BT=y
CONFIG_BT_MRVL=m
CONFIG_BT_MRVL_SDIO=m
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_ATH3K=y
CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
CONFIG_MAC80211=m
CONFIG_RFKILL=y
CONFIG_RFKILL_INPUT=y
@@ -202,6 +219,7 @@ CONFIG_SATA_RCAR=y
CONFIG_NETDEVICES=y
CONFIG_HIX5HD2_GMAC=y
CONFIG_SUN4I_EMAC=y
+CONFIG_ATL1C=y
CONFIG_MACB=y
CONFIG_NET_CALXEDA_XGMAC=y
CONFIG_IGB=y
@@ -224,6 +242,10 @@ CONFIG_USB_PEGASUS=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
+CONFIG_ATH_CARDS=m
+CONFIG_ATH_DEBUG=y
+CONFIG_ATH6KL=m
+CONFIG_ATH6KL_SDIO=m
CONFIG_BRCMFMAC=m
CONFIG_RT2X00=m
CONFIG_RT2800USB=m
@@ -301,6 +323,7 @@ CONFIG_I2C_DIGICOLOR=m
CONFIG_I2C_GPIO=m
CONFIG_I2C_EXYNOS5=y
CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_QUP=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_S3C2410=y
CONFIG_I2C_SH_MOBILE=y
@@ -330,9 +353,16 @@ CONFIG_SPI_TEGRA20_SFLASH=y
CONFIG_SPI_TEGRA20_SLINK=y
CONFIG_SPI_XILINX=y
CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
CONFIG_PINCTRL_AS3722=y
CONFIG_PINCTRL_PALMAS=y
+CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
+CONFIG_PINCTRL_IPQ8064=y
+CONFIG_PINCTRL_MSM8960=y
+CONFIG_PINCTRL_MSM8X74=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_GPIO_DAVINCI=y
@@ -371,6 +401,7 @@ CONFIG_DAVINCI_WATCHDOG=m
CONFIG_EXYNOS_THERMAL=m
CONFIG_ST_THERMAL_SYSCFG=y
CONFIG_ST_THERMAL_MEMMAP=y
+CONFIG_QCOM_TSENS=y
CONFIG_WATCHDOG=y
CONFIG_XILINX_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
@@ -391,6 +422,8 @@ CONFIG_MFD_MAX14577=y
CONFIG_MFD_MAX77686=y
CONFIG_MFD_MAX77693=y
CONFIG_MFD_MAX8907=y
+CONFIG_MFD_PM8921_CORE=y
+CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_STMPE=y
CONFIG_MFD_PALMAS=y
@@ -415,6 +448,7 @@ CONFIG_REGULATOR_MAX77802=m
CONFIG_REGULATOR_PALMAS=y
CONFIG_REGULATOR_PBIAS=y
CONFIG_REGULATOR_PWM=m
+CONFIG_REGULATOR_QCOM_RPM=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS51632=y
@@ -464,6 +498,7 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_AS3711=y
+CONFIG_BACKLIGHT_GPIO=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_SOUND=m
@@ -494,6 +529,7 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
@@ -514,12 +550,13 @@ CONFIG_KEYSTONE_USB_PHY=y
CONFIG_OMAP_USB3=y
CONFIG_USB_GPIO_VBUS=y
CONFIG_USB_ISP1301=y
+CONFIG_USB_MSM_OTG=y
CONFIG_USB_MXS_PHY=y
CONFIG_USB_RCAR_PHY=m
CONFIG_USB_GADGET=y
CONFIG_USB_RENESAS_USBHS_UDC=m
CONFIG_MMC=y
-CONFIG_MMC_BLOCK_MINORS=16
+CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
@@ -536,6 +573,7 @@ CONFIG_MMC_SDHCI_ST=y
CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
CONFIG_MMC_ATMELMCI=y
+CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_MVSDIO=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
@@ -582,6 +620,7 @@ CONFIG_RTC_DRV_DIGICOLOR=m
CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_S3C=m
CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_PM8XXX=y
CONFIG_RTC_DRV_AT91RM9200=m
CONFIG_RTC_DRV_AT91SAM9=m
CONFIG_RTC_DRV_VT8500=y
@@ -605,6 +644,8 @@ CONFIG_IMX_SDMA=y
CONFIG_IMX_DMA=y
CONFIG_MXS_DMA=y
CONFIG_DMA_OMAP=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_ADM=y
CONFIG_XILINX_VDMA=y
CONFIG_DMA_SUN6I=y
CONFIG_STAGING=y
@@ -627,6 +668,12 @@ CONFIG_APQ_MMCC_8084=y
CONFIG_MSM_GCC_8660=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
+CONFIG_QCOM_RPMCC=y
+CONFIG_QCOM_HFPLL=y
+CONFIG_KPSS_XCC=y
+CONFIG_KRAITCC=y
+CONFIG_MSM_RPMCC_8064=y
+CONFIG_MSM_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_PM_DEVFREQ=y
@@ -651,6 +698,7 @@ CONFIG_PHY_HIX5HD2_SATA=y
CONFIG_PWM_STI=m
CONFIG_OMAP_USB2=y
CONFIG_TI_PIPE3=y
+CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_MIPHY28LP=y
CONFIG_PHY_MIPHY365X=y
CONFIG_PHY_RCAR_GEN2=m
@@ -659,6 +707,8 @@ CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_SUN9I_USB=y
CONFIG_PHY_SAMSUNG_USB2=m
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
@@ -682,7 +732,6 @@ CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_CRYPTO_DEV_TEGRA_AES=y
-CONFIG_CPUFREQ_DT=y
CONFIG_KEYSTONE_IRQ=y
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_ARM_CRYPTO=y
@@ -699,3 +748,7 @@ CONFIG_CRYPTO_GHASH_ARM_CE=m
CONFIG_CRYPTO_DEV_ATMEL_AES=m
CONFIG_CRYPTO_DEV_ATMEL_TDES=m
CONFIG_CRYPTO_DEV_ATMEL_SHA=m
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_TRACEPOINT_BENCHMARK=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index ff7985ba226e..77972a6ca8ad 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -1,8 +1,10 @@
CONFIG_SYSVIPC=y
+CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS_ALL=y
@@ -21,19 +23,36 @@ CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8X60=y
CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
+CONFIG_PCI=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_STUB=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIEPORTBUS=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_HIGHPTE=y
CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ARM_QCOM_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_QCOM_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_RUNTIME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -48,25 +67,44 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
-CONFIG_CFG80211=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_ATH3K=y
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
CONFIG_RFKILL=y
+CONFIG_RFKILL_INPUT=y
+CONFIG_RFKILL_GPIO=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_MTD=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_M25P80=y
CONFIG_MTD_SPI_NOR=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
-CONFIG_SCSI=y
+CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
+CONFIG_ATL1C=y
CONFIG_KS8851=y
CONFIG_MDIO_BITBANG=y
CONFIG_MDIO_GPIO=y
@@ -76,6 +114,9 @@ CONFIG_SLIP_MODE_SLIP6=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_ATH_CARDS=m
+CONFIG_ATH6KL=m
+CONFIG_ATH6KL_SDIO=m
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_MOUSE_PS2 is not set
@@ -88,7 +129,6 @@ CONFIG_SERIO_LIBPS2=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
CONFIG_SPI=y
@@ -99,18 +139,87 @@ CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
CONFIG_PINCTRL_MSM8960=y
CONFIG_PINCTRL_MSM8X74=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
CONFIG_GPIOLIB=y
-CONFIG_DEBUG_GPIO=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_MSM=y
CONFIG_THERMAL=y
CONFIG_MFD_QCOM_RPM=y
+CONFIG_CPU_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_WATCHDOG=y
+CONFIG_QCOM_WDT=y
+CONFIG_MFD_PM8921_CORE=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_SYSCON=y
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_QCOM_RPM=y
CONFIG_MEDIA_SUPPORT=y
-CONFIG_FB=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_DTCS033=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STK1135=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+CONFIG_USB_ZR364XX=m
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GPIO=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_DYNAMIC_MINORS=y
@@ -119,15 +228,32 @@ CONFIG_SND_DYNAMIC_MINORS=y
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG_FSM=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_USB_SERIAL=y
+CONFIG_USB_MSM_OTG=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_DEBUG_FILES=y
CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_ETH=m
+CONFIG_USB_GADGETFS=m
+CONFIG_USB_FUNCTIONFS=m
+CONFIG_USB_FUNCTIONFS_RNDIS=y
+CONFIG_USB_MASS_STORAGE=m
CONFIG_MMC=y
CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
@@ -135,8 +261,10 @@ CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
CONFIG_DMADEVICES=y
CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_ADM=y
CONFIG_STAGING=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_APQ_MMCC_8084=y
@@ -145,23 +273,35 @@ CONFIG_MSM_GCC_8660=y
CONFIG_MSM_LCC_8960=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
+CONFIG_QCOM_HFPLL=y
+CONFIG_KPSS_XCC=y
+CONFIG_KRAITCC=y
+CONFIG_MSM_RPMCC_8064=y
CONFIG_MSM_IOMMU=y
CONFIG_QCOM_GSBI=y
CONFIG_QCOM_PM=y
CONFIG_PHY_QCOM_APQ8064_SATA=y
CONFIG_PHY_QCOM_IPQ806X_SATA=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NFS_V3_ACL=y
CONFIG_NFS_V4=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
CONFIG_CIFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
@@ -173,5 +313,5 @@ CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_DETECT_HUNG_TASK is not set
-# CONFIG_SCHED_DEBUG is not set
+CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
diff --git a/arch/arm/include/asm/krait-l2-accessors.h b/arch/arm/include/asm/krait-l2-accessors.h
new file mode 100644
index 000000000000..48fe5527bc01
--- /dev/null
+++ b/arch/arm/include/asm/krait-l2-accessors.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASMARM_KRAIT_L2_ACCESSORS_H
+#define __ASMARM_KRAIT_L2_ACCESSORS_H
+
+extern void krait_set_l2_indirect_reg(u32 addr, u32 val);
+extern u32 krait_get_l2_indirect_reg(u32 addr);
+
+#endif
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index 2256cd1e25d1..afa30dd9521d 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -4,6 +4,8 @@ menuconfig ARCH_QCOM
select ARM_GIC
select ARM_AMBA
select PINCTRL
+ select MIGHT_HAVE_PCI
+ select PCI_DOMAINS if PCI
select QCOM_SCM if SMP
help
Support for Qualcomm's devicetree based systems.
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a7815e5421b..7e2b75f8b046 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2126,6 +2126,15 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+
+ /*
+ * Don't override the dma_ops if they have already been set. Ideally
+ * this should be the only location where dma_ops are set, remove this
+ * check when all other callers of set_dma_ops will have disappeared.
+ */
+ if (dev->archdata.dma_ops)
+ return;
+
if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
dma_ops = arm_get_iommu_dma_map_ops(coherent);
else
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7a59117a2bd9..3aa5028607ae 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -156,6 +156,32 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config ARM64_DMA_USE_IOMMU
+ bool
+ select ARM_HAS_SG_CHAIN
+ select NEED_SG_DMA_LENGTH
+
+if ARM64_DMA_USE_IOMMU
+
+config ARM64_DMA_IOMMU_ALIGNMENT
+ int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+ range 4 9
+ default 8
+ help
+ DMA mapping framework by default aligns all buffers to the smallest
+ PAGE_SIZE order which is greater than or equal to the requested buffer
+ size. This works well for buffers up to a few hundreds kilobytes, but
+ for larger buffers it just a waste of address space. Drivers which has
+ relatively small addressing window (like 64Mib) might run out of
+ virtual space with just a few allocations.
+
+ With this parameter you can specify the maximum PAGE_SIZE order for
+ DMA IOMMU buffers. Larger buffers will be aligned only to this
+ specified order. The order is expressed as a power of two multiplied
+ by the PAGE_SIZE.
+
+endif
+
config SMP
def_bool y
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index e03c11d9d834..181ac9c16460 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -10,6 +10,14 @@
};
};
+ pm8916_gpios_default: default {
+ usb_hub_reset_pm {
+ pins = "gpio1";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ };
+ };
+
usb_sw_sel_pm: usb_sw_sel_pm {
pinconf {
pins = "gpio4";
@@ -34,6 +42,25 @@
pinconf {
pins = "mpp2", "mpp3";
function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
+ output-low; // USB device mode
+ };
+ };
+
+ pm8916_gpios_leds_default: pm8916_gpios_leds_default {
+ pinconf {
+ pins = "gpio1", "gpio2";
+ function = PMIC_GPIO_FUNC_NORMAL;
+ output-low;
+ };
+ };
+};
+
+&pm8916_mpps {
+ pm8916_mpps_leds_default: pm8916_mpps_leds_default {
+ pinconf {
+ pins = "mpp2", "mpp3";
+ function = "digital";
output-low;
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index cbeee0bcdf52..c7db0aeaebaa 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -9,5 +9,69 @@
function = "gpio";
output-low;
};
+
+ usb_id_default: usb_id_default {
+ pins = "gpio121";
+ function = "gpio";
+ drive-strength = <8>;
+ input-enable;
+ bias-pull-up;
+ };
+ };
+
+ msm_gpios_leds_default: msm_gpios_leds_default {
+ pinconf {
+ pins = "gpio21", "gpio120";
+ function = "gpio";
+ output-low;
+ };
+ };
+
+ adv7533_int_active: adv533_int_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_int_suspend: adv7533_int_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio31";
+ };
+ pinconf {
+ pins = "gpio31";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_active: adv7533_switch_active {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ adv7533_switch_suspend: adv7533_switch_suspend {
+ pinmux {
+ function = "gpio";
+ pins = "gpio32";
+ };
+ pinconf {
+ pins = "gpio32";
+ drive-strength = <2>;
+ bias-disable;
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
index 825f489a2af7..b0cac5337fcc 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts
@@ -12,10 +12,11 @@
*/
/dts-v1/;
-
+#include <dt-bindings/arm/qcom-ids.h>
#include "apq8016-sbc.dtsi"
/ {
model = "Qualcomm Technologies, Inc. APQ 8016 SBC";
compatible = "qcom,apq8016-sbc", "qcom,apq8016", "qcom,sbc";
+ qcom,board-id = <QCOM_BRD_ID(SBC, 1, 0) QCOM_BRD_SUBTYPE_DEFAULT>;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 66804ffbc6d2..bc40ec5945aa 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -11,14 +11,17 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/sound/apq8016-lpass.h>
#include "msm8916.dtsi"
#include "pm8916.dtsi"
#include "apq8016-sbc-soc-pins.dtsi"
#include "apq8016-sbc-pmic-pins.dtsi"
-
+#include "msm8916-mdss.dtsi"
/ {
aliases {
serial0 = &blsp1_uart2;
+ serial1 = &blsp1_uart1;
};
chosen {
@@ -33,6 +36,31 @@
pinctrl-1 = <&blsp1_uart2_sleep>;
};
+ i2c@78b6000 {
+ /* On Low speed expansion */
+ status = "okay";
+ };
+
+ i2c@78b8000 {
+ /* On High speed expansion */
+ status = "okay";
+ };
+
+ i2c@78ba000 {
+ /* On Low speed expansion */
+ status = "okay";
+ };
+
+ spi@78b7000 {
+ /* On High speed expansion */
+ status = "okay";
+ };
+
+ spi@78b9000 {
+ /* On Low speed expansion */
+ status = "okay";
+ };
+
leds {
pinctrl-names = "default";
pinctrl-0 = <&msmgpio_leds>,
@@ -84,4 +112,228 @@
};
};
};
+
+ usb2513 {
+ compatible = "smsc,usb3503";
+ reset-gpios = <&pm8916_gpios 3 GPIO_ACTIVE_LOW>;
+ initial-mode = <1>;
+ };
+
+ usb_id: usb-id {
+ interrupt-parent = <&msmgpio>;
+ compatible = "linux,extcon-usb-gpio";
+ id-gpio = <&msmgpio 121 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_id_default>;
+ };
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&msm_gpios_leds_default>,
+ <&pm8916_gpios_leds_default>,
+ <&pm8916_mpps_leds_default>;
+
+ compatible = "gpio-leds";
+
+ led@1 {
+ label = "apq8016-sbc:green:user0";
+ gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led@2 {
+ label = "apq8016-sbc:green:user1";
+ gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led@3 {
+ label = "apq8016-sbc:green:user2";
+ gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led@4 {
+ label = "apq8016-sbc:green:user3";
+ gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+
+ led@5 {
+ label = "apq8016-sbc:yellow:wifi";
+ gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "off";
+ };
+
+ led@6 {
+ label = "apq8016-sbc:blue:bt";
+ gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "bt";
+ default-state = "off";
+ };
+ };
+
+};
+&blsp_dma {
+ status = "okay";
+};
+
+&blsp1_uart1 {
+ status = "okay";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&blsp1_uart1_default>;
+ pinctrl-1 = <&blsp1_uart1_sleep>;
+};
+
+&blsp_spi3 {
+ status = "okay";
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8916_l8>;
+ vqmmc-supply = <&pm8916_l5>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+ status = "okay";
+};
+
+&sdhc_2 {
+ vmmc-supply = <&pm8916_l11>;
+ vqmmc-supply = <&pm8916_l12>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
+ status = "okay";
+};
+
+&usb_dev {
+ extcon = <&usb_id>, <&usb_id>;
+ status = "okay";
+};
+
+&usb_host {
+ status = "okay";
+};
+
+&usb_otg {
+ extcon = <&usb_id>, <&usb_id>;
+ dr_mode = "otg";
+ status = "okay";
+ switch-gpio = <&pm8916_gpios 4 GPIO_ACTIVE_HIGH>; // D+/D- lines: 1 - Routed to HUB, 0 - Device
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb_sw_sel_pm>;
+};
+
+&blsp_i2c4 {
+ status = "ok";
+
+ adv_bridge: bridge@39 {
+ status = "ok";
+ compatible = "adi,adv7533";
+ reg = <0x39>;
+ interrupt-parent = <&msmgpio>;
+ //interrupts = <31 2>;
+ adi,dsi-lanes = <4>;
+ pd-gpios = <&msmgpio 32 0>;
+ pinctrl-names = "default","sleep";
+ pinctrl-0 = <&adv7533_int_active &adv7533_switch_active>;
+ pinctrl-1 = <&adv7533_int_suspend &adv7533_switch_suspend>;
+ #sound-dai-cells = <0>;
+
+ port {
+ adv_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+
+ };
+};
+
+&mdss_dsi0 {
+ status = "ok";
+
+ port {
+ dsi_out: endpoint {
+ remote-endpoint = <&adv_in>;
+ };
+ };
+};
+
+&lpass {
+ status = "okay";
+};
+
+&wcd_codec {
+ status = "okay";
+ clocks = <&gcc GCC_CODEC_DIGCODEC_CLK>;
+ clock-names = "mclk";
+ digital = <&wcd_digital>;
+};
+ /*
+ Internal Codec
+ playback - Primary MI2S
+ capture - Ter MI2S
+
+ External Primary:
+ playback - secondary MI2S
+ capture - Quat MI2S
+
+ External Secondary:
+ playback - Quat MI2S
+ capture - Quat MI2S
+
+ */
+&sound {
+ status = "okay";
+ pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
+ pinctrl-names = "default", "sleep";
+ qcom,model = "DB410c";
+
+
+ internal-codec-playback-dai-link@0 { /* I2S - Internal codec */
+ link-name = "WCD";
+ cpu { /* PRIMARY */
+ sound-dai = <&lpass MI2S_PRIMARY>;
+ };
+ codec {
+ sound-dai = <&wcd_codec 0>;
+ };
+ };
+
+ /* External Primary or External Secondary -ADV7533 HDMI */
+ external-dai-link@0 {
+ link-name = "ADV7533";
+
+ cpu { /* QUAT */
+ sound-dai = <&lpass MI2S_QUATERNARY>;
+ };
+ codec {
+ sound-dai = <&adv_bridge 0>;
+ };
+ };
+};
+
+/* default regulators required for mezzanine boards */
+&pm8916_l15 {
+ regulator-always-on;
};
diff --git a/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi
new file mode 100644
index 000000000000..6f73eb26aaec
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-iommu-v2.dtsi
@@ -0,0 +1,238 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gfx_iommu: qcom,iommu@1f00000 {
+ compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0x1f00000 0x10000>;
+ reg-names = "iommu_base";
+ interrupts = <0 43 0>, <0 42 0>;
+ interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq";
+ label = "gfx_iommu";
+ qcom,iommu-secure-id = <18>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_GFX_TCU_CLK>;
+ clock-names = "iface_clk", "core_clk";
+ status = "disabled";
+
+ qcom,iommu-ctx@1f09000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1f09000 0x1000>;
+ interrupts = <0 241 0>;
+ qcom,iommu-ctx-sids = <0>;
+ label = "gfx3d_user";
+ };
+
+ qcom,iommu-ctx@1f0a000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1f0a000 0x1000>;
+ interrupts = <0 242 0>;
+ qcom,iommu-ctx-sids = <1>;
+ label = "gfx3d_priv";
+ };
+ };
+
+ apps_iommu: qcom,iommu@1e00000 {
+ compatible = "qcom,msm-smmu-v2", "qcom,msm-mmu-500";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0x1e00000 0x40000
+ 0x1ef0000 0x3000>;
+ reg-names = "iommu_base", "smmu_local_base";
+ interrupts = <0 43 0>, <0 42 0>;
+ interrupt-names = "global_cfg_NS_irq", "global_cfg_S_irq";
+ label = "apps_iommu";
+ qcom,iommu-secure-id = <17>;
+ clocks = <&gcc GCC_SMMU_CFG_CLK>,
+ <&gcc GCC_APSS_TCU_CLK>;
+ clock-names = "iface_clk", "core_clk";
+ qcom,cb-base-offset = <0x20000>;
+ status = "disabled";
+
+ qcom,iommu-ctx@1e22000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e22000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x2000>;
+ label = "jpeg_enc0";
+ };
+
+ qcom,iommu-ctx@1e23000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e23000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x400>;
+ label = "vfe";
+ };
+
+ qcom,iommu-ctx@1e24000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e24000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0xc00>;
+ label = "mdp_0";
+ };
+
+ venus_ns: qcom,iommu-ctx@1e25000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e25000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x800 0x801 0x802 0x803
+ 0x804 0x805 0x807>;
+ label = "venus_ns";
+ };
+
+ qcom,iommu-ctx@1e26000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e26000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x402>;
+ label = "cpp";
+ };
+
+ qcom,iommu-ctx@1e27000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e27000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1000>;
+ label = "mDSP";
+ };
+
+ qcom,iommu-ctx@1e28000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e28000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1400>;
+ label = "gss";
+ };
+
+ qcom,iommu-ctx@1e29000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e29000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1800>;
+ label = "a2";
+ };
+
+ qcom,iommu-ctx@1e32000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e32000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0xc01>;
+ label = "mdp_1";
+ };
+
+ venus_sec_pixel: qcom,iommu-ctx@1e33000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e33000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x885>;
+ label = "venus_sec_pixel";
+ };
+
+ venus_sec_bitstream: qcom,iommu-ctx@1e34000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e34000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x880 0x881 0x882 0x883 0x884>;
+ label = "venus_sec_bitstream";
+ };
+
+ venus_sec_non_pixel: qcom,iommu-ctx@1e35000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e35000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x887 0x8a0>;
+ label = "venus_sec_non_pixel";
+ };
+
+ venus_fw: qcom,iommu-ctx@1e36000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e36000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x8c0 0x8c6>;
+ label = "venus_fw";
+ };
+
+ periph_rpm: qcom,iommu-ctx@1e37000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ qcom,secure-context;
+ reg = <0x1e37000 0x1000>;
+ interrupts = <0 70 0>, <0 70 0>;
+ qcom,iommu-ctx-sids = <0x40>;
+ label = "periph_rpm";
+ };
+
+ qcom,iommu-ctx@1e38000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e38000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0xC0 0xC4 0xC8 0xCC 0xD0 0xD3
+ 0xD4 0xD7 0xD8 0xDB 0xDC 0xDF
+ 0xF0 0xF3 0xF4 0xF7 0xF8 0xFB
+ 0xFC 0xFF>;
+ label = "periph_CE";
+ };
+
+ qcom,iommu-ctx@1e39000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e39000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x280 0x283 0x284 0x287 0x288
+ 0x28B 0x28C 0x28F 0x290 0x293
+ 0x294 0x297 0x298 0x29B 0x29C
+ 0x29F>;
+ label = "periph_BLSP";
+ };
+
+ qcom,iommu-ctx@1e3a000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3a000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x100>;
+ label = "periph_SDC1";
+ };
+
+ qcom,iommu-ctx@1e3b000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3b000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x140>;
+ label = "periph_SDC2";
+ };
+
+ qcom,iommu-ctx@1e3c000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3c000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x1c0>;
+ label = "periph_audio";
+ };
+
+ qcom,iommu-ctx@1e3d000 {
+ compatible = "qcom,msm-smmu-v2-ctx";
+ reg = <0x1e3d000 0x1000>;
+ interrupts = <0 70 0>;
+ qcom,iommu-ctx-sids = <0x2c0>;
+ label = "periph_USB_HS1";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi
new file mode 100644
index 000000000000..c008dc7a32bb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-coresight.dtsi
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2013 - 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+ tpiu@820000 {
+ compatible = "arm,coresight-tpiu", "arm,primecell";
+ reg = <0x820000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ port {
+ tpiu_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out1>;
+ };
+ };
+ };
+
+ funnel@821000 {
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x821000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /*
+ * Not described input ports:
+ * 0 - connected to Resource and Power Manger CPU ETM
+ * 1 - not-connected
+ * 2 - connected to Modem CPU ETM
+ * 3 - not-connected
+ * 5 - not-connected
+ * 6 - connected trought funnel to Wireless CPU ETM
+ * 7 - connected to STM component
+ */
+ port@4 {
+ reg = <4>;
+ funnel0_in4: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel1_out>;
+ };
+ };
+ port@8 {
+ reg = <0>;
+ funnel0_out: endpoint {
+ remote-endpoint = <&etf_in>;
+ };
+ };
+ };
+ };
+
+ replicator@824000 {
+ compatible = "qcom,coresight-replicator1x", "arm,primecell";
+ reg = <0x824000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ replicator_out0: endpoint {
+ remote-endpoint = <&etr_in>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ replicator_out1: endpoint {
+ remote-endpoint = <&tpiu_in>;
+ };
+ };
+ port@2 {
+ reg = <0>;
+ replicator_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&etf_out>;
+ };
+ };
+ };
+ };
+
+ etf@825000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x825000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ etf_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+ etf_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+ };
+ };
+
+ etr@826000 {
+ compatible = "arm,coresight-tmc", "arm,primecell";
+ reg = <0x826000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ port {
+ etr_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&replicator_out0>;
+ };
+ };
+ };
+
+ funnel@841000 { /* APSS funnel only 4 inputs are used */
+ compatible = "arm,coresight-funnel", "arm,primecell";
+ reg = <0x841000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ funnel1_in0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+ port@1 {
+ reg = <1>;
+ funnel1_in1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+ port@2 {
+ reg = <2>;
+ funnel1_in2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+ port@3 {
+ reg = <3>;
+ funnel1_in3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+ port@4 {
+ reg = <0>;
+ funnel1_out: endpoint {
+ remote-endpoint = <&funnel0_in4>;
+ };
+ };
+ };
+ };
+
+ etm@85c000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85c000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU0>;
+
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel1_in0>;
+ };
+ };
+ };
+
+ etm@85d000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85d000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU1>;
+
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel1_in1>;
+ };
+ };
+ };
+
+ etm@85e000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85e000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU2>;
+
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel1_in2>;
+ };
+ };
+ };
+
+ etm@85f000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0x85f000 0x1000>;
+
+ clocks = <&rpmcc RPM_QDSS_CLK>, <&rpmcc RPM_QDSS_A_CLK>;
+ clock-names = "apb_pclk", "atclk";
+
+ cpu = <&CPU3>;
+
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel1_in3>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi
new file mode 100644
index 000000000000..82acb8df2a8a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-iommu.dtsi
@@ -0,0 +1,21 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm-iommu-v2.dtsi"
+
+&gfx_iommu {
+ status = "ok";
+};
+
+&apps_iommu {
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi
new file mode 100644
index 000000000000..95d3d337ae48
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-mdss.dtsi
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ gpu: qcom,adreno-3xx@01c00000 {
+ compatible = "qcom,adreno-3xx";
+ #stream-id-cells = <16>;
+ reg = <0x01c00000 0x20000>;
+ reg-names = "kgsl_3d0_reg_memory";
+ interrupts = <0 33 0>;
+ interrupt-names = "kgsl_3d0_irq";
+ clock-names =
+ "core_clk",
+ "iface_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "alt_mem_iface_clk",
+ "gfx3d_clk_src";
+ clocks =
+ <&gcc GCC_OXILI_GFX3D_CLK>,
+ <&gcc GCC_OXILI_AHB_CLK>,
+ <&gcc GCC_OXILI_GMEM_CLK>,
+ <&gcc GCC_BIMC_GFX_CLK>,
+ <&gcc GCC_BIMC_GPU_CLK>,
+ <&gcc GFX3D_CLK_SRC>;
+ power-domains = <&gcc OXILI_GDSC>;
+ qcom,chipid = <0x03000600>;
+ qcom,gpu-pwrlevels {
+ compatible = "qcom,gpu-pwrlevels";
+ qcom,gpu-pwrlevel@0 {
+ qcom,gpu-freq = <400000000>;
+ };
+ qcom,gpu-pwrlevel@1 {
+ qcom,gpu-freq = <19200000>;
+ };
+ };
+ };
+
+ mdss_mdp: qcom,mdss_mdp@1a00000 {
+ compatible = "qcom,mdss_mdp";
+ reg = <0x1a00000 0x90000>,
+ <0x1ac8000 0x3000>;
+ reg-names = "mdp_phys", "vbif_phys";
+ interrupts = <0 72 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ connectors = <&mdss_dsi0>;
+ gpus = <&gpu>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc MDP_CLK_SRC>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_VSYNC_CLK>;
+ clock-names = "iface_clk", "bus_clk", "core_clk_src",
+ "core_clk", "lut_clk", "vsync_clk";
+ };
+
+ mdss_dsi0: qcom,mdss_dsi@1a98000 {
+ compatible = "qcom,mdss-dsi-ctrl";
+ label = "MDSS DSI CTRL->0";
+ qcom,dsi-host-index = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x1a98000 0x25c>;
+ reg-names = "dsi_ctrl";
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <4 0>;
+
+ vdda-supply = <&pm8916_l2>;
+ vdd-supply = <&pm8916_l17>;
+ vddio-supply = <&pm8916_l6>;
+
+ clocks = <&gcc GCC_MDSS_MDP_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_AXI_CLK>,
+ <&gcc GCC_MDSS_AHB_CLK>,
+ <&gcc GCC_MDSS_BYTE0_CLK>,
+ <&gcc GCC_MDSS_PCLK0_CLK>,
+ <&gcc GCC_MDSS_ESC0_CLK>,
+ <&gcc BYTE0_CLK_SRC>,
+ <&gcc PCLK0_CLK_SRC>;
+ clock-names = "mdp_core_clk", "iface_clk", "bus_clk",
+ "core_mmss_clk", "byte_clk", "pixel_clk",
+ "core_clk", "byte_clk_src", "pixel_clk_src";
+ qcom,dsi-phy = <&mdss_dsi_phy0>;
+ };
+
+ mdss_dsi_phy0: qcom,mdss_dsi_phy@1a98300 {
+ compatible = "qcom,dsi-phy-28nm-lp";
+ qcom,dsi-phy-index = <0>;
+
+ power-domains = <&gcc MDSS_GDSC>;
+
+ reg-names = "dsi_pll", "dsi_phy", "dsi_phy_regulator";
+ reg = <0x1a98300 0xd4>,
+ <0x1a98500 0x280>,
+ <0x1a98780 0x30>;
+
+ clocks = <&gcc GCC_MDSS_AHB_CLK>;
+ clock-names = "iface_clk";
+
+ vddio-supply = <&pm8916_l6>;
+ };
+};
+
+
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
index fced77f0fd3a..6c68b4ed4de2 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dts
@@ -13,10 +13,13 @@
/dts-v1/;
+#include <dt-bindings/arm/qcom-ids.h>
#include "msm8916-mtp.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM 8916 MTP";
compatible = "qcom,msm8916-mtp", "qcom,msm8916-mtp-smb1360",
"qcom,msm8916", "qcom,mtp";
+ qcom,board-id = <QCOM_BRD_ID(MTP, 1, 0) QCOM_BRD_SUBTYPE_DEFAULT>,
+ <QCOM_BRD_ID(MTP, 1, 0) QCOM_BRD_SUBTYPE_MTP8916_SMB1360>;
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
index a1aa0b201e92..a222af32debe 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-mtp.dtsi
@@ -32,3 +32,42 @@
};
};
};
+
+&blsp_dma {
+ status = "okay";
+};
+
+&blsp_spi3 {
+ status = "okay";
+};
+
+&sdhc_1 {
+ vmmc-supply = <&pm8916_l8>;
+ vqmmc-supply = <&pm8916_l5>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+ pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+ status = "okay";
+};
+
+&sdhc_2 {
+ //vmmc-supply = <&pm8916_l11>;
+ vqmmc-supply = <&pm8916_l12>;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on &sdc2_cd_on>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off &sdc2_cd_off>;
+ #address-cells = <0>;
+ interrupt-parent = <&sdhc_2>;
+ interrupts = <0 1 2>;
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0xffffffff>;
+ interrupt-map = <0 &intc 0 125 0
+ 1 &intc 0 221 0
+ 2 &msmgpio 38 0>;
+ interrupt-names = "hc_irq", "pwr_irq", "status_irq";
+ cd-gpios = <&msmgpio 38 0x1>;
+
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
index 568956859088..5d2e4a5e91ef 100644
--- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -12,6 +12,34 @@
*/
&msmgpio {
+ blsp1_uart1_default: blsp1_uart1_default {
+ pinmux {
+ function = "blsp_uart1";
+ // TX, RX, CTS_N, RTS_N
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart1_sleep: blsp1_uart1_sleep {
+ pinmux {
+ function = "blsp_uart1";
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
blsp1_uart2_default: blsp1_uart2_default {
pinmux {
@@ -241,6 +269,30 @@
};
};
+ i2c2_default: i2c2_default {
+ pinmux {
+ function = "blsp_i2c2";
+ pins = "gpio6", "gpio7";
+ };
+ pinconf {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ i2c2_sleep: i2c2_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio6", "gpio7";
+ };
+ pinconf {
+ pins = "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
i2c4_default: i2c4_default {
pinmux {
function = "blsp_i2c4";
@@ -255,7 +307,7 @@
i2c4_sleep: i2c4_sleep {
pinmux {
- function = "blsp_i2c4";
+ function = "gpio";
pins = "gpio14", "gpio15";
};
pinconf {
@@ -265,6 +317,30 @@
};
};
+ i2c6_default: i2c6_default {
+ pinmux {
+ function = "blsp_i2c6";
+ pins = "gpio22", "gpio23";
+ };
+ pinconf {
+ pins = "gpio22", "gpio23";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ i2c6_sleep: i2c6_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio22", "gpio23";
+ };
+ pinconf {
+ pins = "gpio22", "gpio23";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
sdhc2_cd_pin {
sdc2_cd_on: cd_on {
pinmux {
@@ -427,4 +503,245 @@
};
};
};
+
+ ext-codec-lines {
+ ext_codec_lines_act: lines_on {
+ pinmux {
+ function = "gpio";
+ pins = "gpio67";
+ };
+ pinconf {
+ pins = "gpio67";
+ drive-strength = <8>;
+ bias-disable;
+ output-high;
+ };
+ };
+ ext_codec_lines_sus: lines_off {
+ pinmux {
+ function = "gpio";
+ pins = "gpio67";
+ };
+ pinconf {
+ pins = "gpio67";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ cdc-pdm-lines {
+ cdc_pdm_lines_act: pdm_lines_on {
+ pinmux {
+ function = "cdc_pdm0";
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ };
+ pinconf {
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ };
+ cdc_pdm_lines_sus: pdm_lines_off {
+ pinmux {
+ function = "cdc_pdm0";
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ };
+ pinconf {
+ pins = "gpio63", "gpio64", "gpio65", "gpio66",
+ "gpio67", "gpio68";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ ext-pri-tlmm-lines {
+ ext_pri_tlmm_lines_act: ext_pa_on {
+ pinmux {
+ function = "pri_mi2s";
+ pins = "gpio113", "gpio114", "gpio115",
+ "gpio116";
+ };
+ pinconf {
+ pins = "gpio113", "gpio114", "gpio115",
+ "gpio116";
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ };
+
+ ext_pri_tlmm_lines_sus: ext_pa_off {
+ pinmux {
+ function = "pri_mi2s";
+ pins = "gpio113", "gpio114", "gpio115",
+ "gpio116";
+ };
+ pinconf {
+ pins = "gpio113", "gpio114", "gpio115",
+ "gpio116";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ ext-pri-ws-line {
+ ext_pri_ws_act: ext_pa_on {
+ pinmux {
+ function = "pri_mi2s_ws";
+ pins = "gpio110";
+ };
+ pinconf {
+ pins = "gpio110";
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ };
+
+ ext_pri_ws_sus: ext_pa_off {
+ pinmux {
+ function = "pri_mi2s_ws";
+ pins = "gpio110";
+ };
+ pinconf {
+ pins = "gpio110";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ ext-mclk-tlmm-lines {
+ ext_mclk_tlmm_lines_act: mclk_lines_on {
+ pinmux {
+ function = "pri_mi2s";
+ pins = "gpio116";
+ };
+ pinconf {
+ pins = "gpio116";
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ };
+ ext_mclk_tlmm_lines_sus: mclk_lines_off {
+ pinmux {
+ function = "pri_mi2s";
+ pins = "gpio116";
+ };
+ pinconf {
+ pins = "gpio116";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ /* secondary Mi2S */
+ ext-sec-tlmm-lines {
+ ext_sec_tlmm_lines_act: tlmm_lines_on {
+ pinmux {
+ function = "sec_mi2s";
+ pins = "gpio112", "gpio117", "gpio118",
+ "gpio119";
+ };
+ pinconf {
+ pins = "gpio112", "gpio117", "gpio118",
+ "gpio119";
+ drive-strength = <8>;
+ bias-pull-none;
+ };
+ };
+ ext_sec_tlmm_lines_sus: tlmm_lines_off {
+ pinmux {
+ function = "sec_mi2s";
+ pins = "gpio112", "gpio117", "gpio118",
+ "gpio119";
+ };
+ pinconf {
+ pins = "gpio112", "gpio117", "gpio118",
+ "gpio119";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ cdc-dmic-lines {
+ cdc_dmic_lines_act: dmic_lines_on {
+ pinmux_dmic0_clk {
+ function = "dmic0_clk";
+ pins = "gpio0";
+ };
+ pinmux_dmic0_data {
+ function = "dmic0_data";
+ pins = "gpio1";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1";
+ drive-strength = <8>;
+ };
+ };
+ cdc_dmic_lines_sus: dmic_lines_off {
+ pinconf {
+ pins = "gpio0", "gpio1";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ cross-conn-det {
+ cross_conn_det_act: lines_on {
+ pinmux {
+ function = "gpio";
+ pins = "gpio120";
+ };
+ pinconf {
+ pins = "gpio120";
+ drive-strength = <8>;
+ output-low;
+ bias-pull-down;
+ };
+ };
+ cross_conn_det_sus: lines_off {
+ pinmux {
+ function = "gpio";
+ pins = "gpio120";
+ };
+ pinconf {
+ pins = "gpio120";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ wcnss_default: wcnss_default {
+ pinmux2 {
+ function = "wcss_wlan";
+ pins = "gpio40";
+ };
+ pinmux1 {
+ function = "wcss_wlan";
+ pins = "gpio41";
+ };
+ pinmux0 {
+ function = "wcss_wlan";
+ pins = "gpio42";
+ };
+ pinmux {
+ function = "wcss_wlan";
+ pins = "gpio43", "gpio44";
+ };
+ pinconf {
+ pins = "gpio40", "gpio41", "gpio42", "gpio43",
+ "gpio44";
+ drive-strength = <6>;
+ bias-pull-up;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 5911de008dd5..3416cccf26c3 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -14,10 +14,19 @@
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/qcom,gcc-msm8916.h>
#include <dt-bindings/reset/qcom,gcc-msm8916.h>
+#include <dt-bindings/clock/qcom,rpmcc-msm8916.h>
+#include <dt-bindings/thermal/thermal.h>
+#include <dt-bindings/arm/qcom-ids.h>
/ {
model = "Qualcomm Technologies, Inc. MSM8916";
compatible = "qcom,msm8916";
+ qcom,msm-id = <QCOM_ID_MSM8916 0>,
+ <QCOM_ID_MSM8216 0>,
+ <QCOM_ID_MSM8116 0>,
+ <QCOM_ID_MSM8616 0>,
+ <QCOM_ID_APQ8016 0>;
+
interrupt-parent = <&intc>;
@@ -37,6 +46,32 @@
reg = <0 0 0 0>;
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ reserve_aligned@86000000 {
+ reg = <0x0 0x86000000 0x0 0x0300000>;
+ no-map;
+ };
+
+ smem_mem: smem_region@86300000 {
+ reg = <0x0 0x86300000 0x0 0x0100000>;
+ no-map;
+ };
+
+ hypervisor_mem: hypervisor_region@86400000 {
+ no-map;
+ reg = <0x0 0x86400000 0x0 0x0400000>;
+ };
+
+ peripheral_mem: peripheral_region@8b600000 {
+ no-map;
+ reg = <0x0 0x8b600000 0x0 0x0600000>;
+ };
+ };
+
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -45,24 +80,130 @@
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc0>;
+ next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
+ L2_0: l2-cache {
+ compatible = "arm,arch-cache";
+ cache-level = <2>;
+ power-domain = <&l2ccc_0>;
+ };
};
CPU1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x1>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc1>;
+ next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
CPU2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x2>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc2>;
+ next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
};
CPU3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x3>;
+ enable-method = "qcom,arm-cortex-acc";
+ qcom,acc = <&acc3>;
+ next-level-cache = <&L2_0>;
+ clocks = <&a53cc 1>;
+ clock-latency = <200000>;
+ cpu-supply = <&pm8916_s2>;
+ /* cooling options */
+ cooling-min-level = <0>;
+ cooling-max-level = <7>;
+ #cooling-cells = <2>;
+ };
+ };
+
+ cpu-pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_PPI 7 GIC_CPU_MASK_SIMPLE(4)>;
+ };
+
+ thermal-zones {
+ cpu-thermal0 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 4>;
+
+ trips {
+ cpu_alert0: trip@0 {
+ temperature = <75000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit0: trip@1 {
+ temperature = <95000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert0>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
+ };
+
+ cpu-thermal1 {
+ polling-delay-passive = <250>;
+ polling-delay = <1000>;
+
+ thermal-sensors = <&tsens 3>;
+
+ trips {
+ cpu_alert1: trip@0 {
+ temperature = <100000>;
+ hysteresis = <2000>;
+ type = "passive";
+ };
+ cpu_crit1: trip@1 {
+ temperature = <125000>;
+ hysteresis = <2000>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ map0 {
+ trip = <&cpu_alert1>;
+ cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+ };
+ };
};
};
@@ -74,6 +215,15 @@
<GIC_PPI 1 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
};
+ clocks {
+ xo_board: xo_board {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <19200000>;
+ clock-output-names = "xo_board";
+ };
+ };
+
soc: soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -95,19 +245,229 @@
#interrupt-cells = <2>;
};
+ tcsr_mutex_regs: syscon@1905000 {
+ compatible = "syscon";
+ reg = <0x1905000 0x20000>;
+ };
+
+ tcsr_mutex: hwlock {
+ compatible = "qcom,tcsr-mutex";
+ syscon = <&tcsr_mutex_regs 0 0x1000>;
+ #hwlock-cells = <1>;
+ };
+
+ smem {
+ compatible = "qcom,smem";
+ reg = <0x60000 0x8000>;
+ reg-names = "aux-mem1";
+
+ memory-region = <&smem_mem>;
+
+ hwlocks = <&tcsr_mutex 3>;
+ };
+
+
+ apcs: syscon@b011000 {
+ compatible = "syscon";
+ reg = <0x0b011000 0x1000>;
+ };
+
+ smd {
+ compatible = "qcom,smd";
+
+ rpm {
+ interrupts = <0 168 1>;
+ qcom,ipc = <&apcs 8 0>;
+ qcom,smd-edge = <15>;
+ qcom,remote-pid = <0xffffffff>;
+
+ rpm_requests {
+ compatible = "qcom,rpm-msm8916";
+ qcom,smd-channels = "rpm_requests";
+
+ rpmcc: qcom,rpmcc {
+ compatible = "qcom,rpmcc-msm8916";
+ #clock-cells = <1>;
+ };
+
+ pm8916-regulators {
+ compatible = "qcom,rpm-pm8916-regulators";
+
+ vdd_l1_l2_l3-supply = <&pm8916_s3>;
+ vdd_l5-supply = <&pm8916_s3>;
+ vdd_l4_l5_l6-supply = <&pm8916_s4>;
+ vdd_l7-supply = <&pm8916_s4>;
+
+ pm8916_s1: s1 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1562000>;
+ };
+ s2 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1562000>;
+ };
+ pm8916_s3: s3 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1562000>;
+ };
+ pm8916_s4: s4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ pm8916_l1: l1 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1525000>;
+ };
+ pm8916_l2: l2 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1525000>;
+ };
+ pm8916_l3: l3 {
+ regulator-min-microvolt = <375000>;
+ regulator-max-microvolt = <1525000>;
+ };
+ pm8916_l4: l4 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l5: l5 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l6: l6 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l7: l7 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l8: l8 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l9: l9 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l10: l10 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l11: l11 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l12: l12 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l13: l13 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l14: l14 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l15: l15 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l16: l16 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l17: l17 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ pm8916_l18: l18 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
+ };
+ };
+ };
+ };
+
+ pronto_smd_edge: pronto {
+ interrupts = <0 142 1>;
+
+ qcom,ipc = <&apcs 8 17>;
+ qcom,smd-edge = <6>;
+ qcom,remote-pid = <4>;
+
+ bt {
+ compatible = "qcom,hci-smd";
+ qcom,smd-channels = "APPS_RIVA_BT_CMD", "APPS_RIVA_BT_ACL";
+ qcom,smd-channel-names = "event", "data";
+ };
+
+ ipcrtr {
+ compatible = "qcom,ipcrtr";
+ qcom,smd-channels = "IPCRTR";
+ };
+
+ wifi {
+ compatible = "qcom,wlan-ctrl";
+ qcom,smd-channels = "WLAN_CTRL";
+
+ interrupts = <0 145 0>, <0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ qcom,wcnss_mmio = <0xfb000000 0x21b000>;
+
+ // qcom,tx-enable-gpios = <&apps_smsm 10 0>;
+ // qcom,tx-rings-empty-gpios = <&apps_smsm 9 0>;
+ };
+
+ wcnss_ctrl {
+ compatible = "qcom,wcnss-ctrl";
+ qcom,smd-channels = "WCNSS_CTRL";
+
+ qcom,wcnss_mmio = <0xfb21b000 0x3000>;
+ };
+ };
+ };
+
gcc: qcom,gcc@1800000 {
compatible = "qcom,gcc-msm8916";
#clock-cells = <1>;
#reset-cells = <1>;
+ #power-domain-cells = <1>;
reg = <0x1800000 0x80000>;
};
+ a53cc: qcom,a53cc@0b016000 {
+ compatible = "qcom,clock-a53-msm8916";
+ reg = <0x0b016000 0x40>;
+ #clock-cells = <1>;
+ qcom,apcs = <&apcs>;
+ };
+
+ blsp1_uart1: serial@78af000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0x78af000 0x200>;
+ interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_UART1_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 1>, <&blsp_dma 0>;
+ dma-names = "rx", "tx";
+ status = "disabled";
+ };
+
blsp1_uart2: serial@78b0000 {
compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
reg = <0x78b0000 0x200>;
interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;
clock-names = "core", "iface";
+ dmas = <&blsp_dma 3>, <&blsp_dma 2>;
+ dma-names = "rx", "tx";
status = "disabled";
};
@@ -224,6 +584,21 @@
status = "disabled";
};
+ blsp_i2c2: i2c@78b6000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x78b6000 0x1000>;
+ interrupts = <GIC_SPI 96 0>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP2_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c2_default>;
+ pinctrl-1 = <&i2c2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
blsp_i2c4: i2c@78b8000 {
compatible = "qcom,i2c-qup-v2.2.1";
reg = <0x78b8000 0x1000>;
@@ -239,6 +614,21 @@
status = "disabled";
};
+ blsp_i2c6: i2c@78ba000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x78ba000 0x1000>;
+ interrupts = <GIC_SPI 100 0>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c6_default>;
+ pinctrl-1 = <&i2c6_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
sdhc_1: sdhci@07824000 {
compatible = "qcom,sdhci-msm-v4";
reg = <0x07824900 0x11c>, <0x07824000 0x800>;
@@ -291,10 +681,13 @@
interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_BOTH>,
<GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+ v1p8-supply = <&pm8916_l7>;
+ v3p3-supply = <&pm8916_l13>;
qcom,vdd-levels = <1 5 7>;
qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
dr_mode = "peripheral";
qcom,otg-control = <2>; // PMIC
+ qcom,manual-pullup;
clocks = <&gcc GCC_USB_HS_AHB_CLK>,
<&gcc GCC_USB_HS_SYSTEM_CLK>,
@@ -314,6 +707,11 @@
reg = <0x0b000000 0x1000>, <0x0b002000 0x1000>;
};
+ l2ccc_0: clock-controller@b011000 {
+ compatible = "qcom,8916-l2ccc";
+ reg = <0x0b011000 0x1000>;
+ };
+
timer@b020000 {
#address-cells = <1>;
#size-cells = <1>;
@@ -390,7 +788,244 @@
interrupt-controller;
#interrupt-cells = <4>;
};
+
+ acc0: clock-controller@b088000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b088000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc1: clock-controller@b098000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b098000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc2: clock-controller@b0a8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0a8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ acc3: clock-controller@b0b8000 {
+ compatible = "qcom,arm-cortex-acc";
+ reg = <0x0b0b8000 0x1000>,
+ <0x0b008000 0x1000>;
+ };
+
+ /* Audio */
+
+ wcd_digital: codec-digital{
+ compatible = "syscon", "qcom,apq8016-wcd-digital-codec";
+ reg = <0x0771c000 0x400>;
+ };
+
+ lpass: lpass-cpu@07700000 {
+ status = "disabled";
+ compatible = "qcom,lpass-cpu-apq8016";
+ clocks = <&gcc GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK>,
+ <&gcc GCC_ULTAUDIO_PCNOC_MPORT_CLK>,
+ <&gcc GCC_ULTAUDIO_PCNOC_SWAY_CLK>,
+ <&gcc GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK>,
+ <&gcc GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK>,
+ <&gcc GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK>,
+ <&gcc GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK>;
+
+ clock-names = "ahbix-clk",
+ "pcnoc-mport-clk",
+ "pcnoc-sway-clk",
+ "mi2s-bit-clk0",
+ "mi2s-bit-clk1",
+ "mi2s-bit-clk2",
+ "mi2s-bit-clk3";
+ #sound-dai-cells = <1>;
+
+ interrupts = <0 160 0>;
+ interrupt-names = "lpass-irq-lpaif";
+ reg = <0x07708000 0x10000>, <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "lpass-lpaif", "mic-iomux", "spkr-iomux";
+ };
+
+ sound: sound {
+ status = "disabled";
+ compatible = "qcom,apq8016-sbc-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+ };
+
+ tcsr: syscon@1937000 {
+ compatible = "qcom,tcsr-msm8916", "syscon";
+ reg = <0x1937000 0x30000>;
+ };
+
+ uqfprom: eeprom@58000 {
+ compatible = "qcom,qfprom-msm8916";
+ reg = <0x58000 0x7000>;
+ };
+
+ cpr@b018000 {
+ compatible = "qcom,cpr";
+ reg = <0xb018000 0x1000>;
+ interrupts = <0 15 1>, <0 16 1>, <0 17 1>;
+ vdd-mx-supply = <&pm8916_l3>;
+ acc-syscon = <&tcsr>;
+ eeprom = <&uqfprom>;
+
+ qcom,cpr-ref-clk = <19200>;
+ qcom,cpr-timer-delay-us = <5000>;
+ qcom,cpr-timer-cons-up = <0>;
+ qcom,cpr-timer-cons-down = <2>;
+ qcom,cpr-up-threshold = <0>;
+ qcom,cpr-down-threshold = <2>;
+ qcom,cpr-idle-clocks = <15>;
+ qcom,cpr-gcnt-us = <1>;
+ qcom,vdd-apc-step-up-limit = <1>;
+ qcom,vdd-apc-step-down-limit = <1>;
+ qcom,cpr-cpus = <&CPU0 &CPU1 &CPU2 &CPU3>;
+ };
+
+ qfprom: qfprom@5c000 {
+ compatible = "qcom,qfprom";
+ reg = <0x5c000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ tsens_caldata: caldata@d0 {
+ reg = <0xd0 0x8>;
+ };
+ tsens_calsel: calsel@ec {
+ reg = <0xec 0x4>;
+ };
+ };
+
+ tsens: thermal-sensor@4a8000 {
+ compatible = "qcom,msm8916-tsens";
+ reg = <0x4a8000 0x2000>;
+ nvmem-cells = <&tsens_caldata>, <&tsens_calsel>;
+ nvmem-cell-names = "calib", "calib_sel";
+ qcom,tsens-slopes = <3200 3200 3200 3200 3200>;
+ qcom,sensor-id = <0 1 2 4 5>;
+ #thermal-sensor-cells = <1>;
+ };
+
+ wcnss-smp2p {
+ compatible = "qcom,smp2p";
+ qcom,smem = <451>, <431>;
+
+ interrupts = <0 143 1>;
+
+ qcom,ipc = <&apcs 8 18>;
+
+ qcom,local-pid = <0>;
+ qcom,remote-pid = <4>;
+
+ wcnss_smp2p_out: master-kernel {
+ qcom,entry-name = "master-kernel";
+ qcom,outbound;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ wcnss_smp2p_in: slave-kernel {
+ qcom,entry-name = "slave-kernel";
+ qcom,inbound;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+ };
+
+ pronto_rproc:pronto_rproc {
+ compatible = "qcom,tz-pil";
+
+ interrupts-extended = <&intc 0 149 1>,
+ <&wcnss_smp2p_in 0 0>,
+ <&wcnss_smp2p_in 1 0>,
+ <&wcnss_smp2p_in 2 0>,
+ <&wcnss_smp2p_in 3 0>;
+ interrupt-names = "wdog", "fatal", "ready", "handover", "stop-ack";
+
+ clocks = <&gcc GCC_CRYPTO_CLK>,
+ <&gcc GCC_CRYPTO_AHB_CLK>,
+ <&gcc GCC_CRYPTO_AXI_CLK>,
+ <&gcc CRYPTO_CLK_SRC>;
+ clock-names = "scm_core_clk", "scm_iface_clk", "scm_bus_clk", "scm_src_clk";
+
+ qcom,firmware-name = "wcnss";
+ qcom,pas-id = <6>;
+
+ qcom,crash-reason = <422>;
+ qcom,smd-edges = <&pronto_smd_edge>;
+
+ qcom,pll-supply = <&pm8916_l7>;
+ qcom,pll-uV = <1800000>;
+ qcom,pll-uA = <18000>;
+
+ qcom,stop-gpio = <&wcnss_smp2p_out 0 0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&wcnss_default>;
+
+ memory-region = <&peripheral_mem>;
+ };
+
+ qcom,wcn36xx@0a000000 {
+ compatible = "qcom,wcn3620";
+ reg = <0x0a000000 0x280000>,
+ <0xb011008 0x04>,
+ <0x0a21b000 0x3000>,
+ <0x03204000 0x00000100>,
+ <0x03200800 0x00000200>,
+ <0x0A100400 0x00000200>,
+ <0x0A205050 0x00000200>,
+ <0x0A219000 0x00000020>,
+ <0x0A080488 0x00000008>,
+ <0x0A080fb0 0x00000008>,
+ <0x0A08040c 0x00000008>,
+ <0x0A0120a8 0x00000008>,
+ <0x0A012448 0x00000008>,
+ <0x0A080c00 0x00000001>;
+
+ reg-names = "wcnss_mmio", "wcnss_fiq",
+ "pronto_phy_base", "riva_phy_base",
+ "riva_ccu_base", "pronto_a2xb_base",
+ "pronto_ccpu_base", "pronto_saw2_base",
+ "wlan_tx_phy_aborts","wlan_brdg_err_source",
+ "wlan_tx_status", "alarms_txctl",
+ "alarms_tactl", "pronto_mcu_base";
+
+ interrupts = <0 145 0 0 146 0>;
+ interrupt-names = "wcnss_wlantx_irq", "wcnss_wlanrx_irq";
+
+ // qcom,pronto-vddmx-supply = <&pm8916_l3>;
+ // qcom,pronto-vddcx-supply = <&pm8916_s1_corner>;
+ // qcom,pronto-vddpx-supply = <&pm8916_l7>;
+ // qcom,iris-vddxo-supply = <&pm8916_l7>;
+ // qcom,iris-vddrfa-supply = <&pm8916_s3>;
+ // qcom,iris-vddpa-supply = <&pm8916_l9>;
+ // qcom,iris-vdddig-supply = <&pm8916_l5>;
+
+ pinctrl-names = "wcnss_default";
+ // pinctrl-names = "wcnss_default", "wcnss_sleep",
+ // "wcnss_gpio_default";
+ pinctrl-0 = <&wcnss_default>;
+ // pinctrl-1 = <&wcnss_sleep>;
+ // pinctrl-2 = <&wcnss_gpio_default>;
+
+ // clocks = <&rpmcc RPM_XO_CLK_SRC>,
+ // <&rpmcc RPM_RF_CLK2>;
+ //clock-names = "xo", "rf_clk";
+
+ rproc = <&pronto_rproc>;
+ qcom,has-autodetect-xo;
+ qcom,wlan-rx-buff-count = <512>;
+ qcom,is-pronto-vt;
+ qcom,has-pronto-hw;
+ // qcom,wcnss-adc_tm = <&pm8916_adc_tm>;
+ };
};
};
#include "msm8916-pins.dtsi"
+#include "msm8916-iommu.dtsi"
+#include "msm8916-coresight.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/pm8916.dtsi b/arch/arm64/boot/dts/qcom/pm8916.dtsi
index b222ece7e3d2..5f6a92516eab 100644
--- a/arch/arm64/boot/dts/qcom/pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8916.dtsi
@@ -95,5 +95,131 @@
reg = <0x1 SPMI_USID>;
#address-cells = <1>;
#size-cells = <0>;
+
+ regulators {
+ compatible = "qcom,pm8916-regulators";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ s1@1400 {
+ reg = <0x1400 0x300>;
+ status = "disabled";
+ };
+
+ pm8916_s2: s2@1700 {
+ reg = <0x1700 0x300>;
+ status = "ok";
+ regulator-min-microvolt = <1050000>;
+ regulator-max-microvolt = <1350000>;
+ };
+
+ s3@1a00 {
+ reg = <0x1a00 0x300>;
+ status = "disabled";
+ };
+
+ s4@1d00 {
+ reg = <0x1d00 0x300>;
+ status = "disabled";
+ };
+
+ l1@4000 {
+ reg = <0x4000 0x100>;
+ status = "disabled";
+ };
+
+ l2@4100 {
+ reg = <0x4100 0x100>;
+ status = "disabled";
+ };
+
+ l3@4200 {
+ reg = <0x4200 0x100>;
+ status = "disabled";
+ };
+
+ l4@4300 {
+ reg = <0x4300 0x100>;
+ status = "disabled";
+ };
+
+ l5@4400 {
+ reg = <0x4400 0x100>;
+ status = "disabled";
+ };
+
+ l6@4500 {
+ reg = <0x4500 0x100>;
+ status = "disabled";
+ };
+
+ l7@4600 {
+ reg = <0x4600 0x100>;
+ status = "disabled";
+ };
+
+ l8@4700 {
+ reg = <0x4700 0x100>;
+ status = "disabled";
+ };
+
+ l9@4800 {
+ reg = <0x4800 0x100>;
+ status = "disabled";
+ };
+
+ l10@4900 {
+ reg = <0x4900 0x100>;
+ status = "disabled";
+ };
+
+ l11@4a00 {
+ reg = <0x4a00 0x100>;
+ status = "disabled";
+ };
+
+ l12@4b00 {
+ reg = <0x4b00 0x100>;
+ status = "disabled";
+ };
+
+ l13@4c00 {
+ reg = <0x4c00 0x100>;
+ status = "disabled";
+ };
+
+ l14@4d00 {
+ reg = <0x4d00 0x100>;
+ status = "disabled";
+ };
+
+ l15@4e00 {
+ reg = <0x4e00 0x100>;
+ status = "disabled";
+ };
+
+ l16@4f00 {
+ reg = <0x4f00 0x100>;
+ status = "disabled";
+ };
+
+ l17@5000 {
+ reg = <0x5000 0x100>;
+ status = "disabled";
+ };
+
+ l18@5100 {
+ reg = <0x5100 0x100>;
+ status = "disabled";
+ };
+ };
+
+ wcd_codec: codec@f000 {
+ compatible = "qcom,apq8016-wcd-codec";
+ reg = <0xf000 0x200>;
+ #sound-dai-cells = <0>;
+ vddio-supply = <&pm8916_l5>;
+ vdd-pa-supply = <&pm8916_s4>;
+ };
};
};
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 34d71dd86781..8f084e81cf14 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -23,7 +23,6 @@ CONFIG_CGROUP_HUGETLB=y
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
@@ -38,10 +37,10 @@ CONFIG_ARCH_FSL_LS2085A=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_TEGRA_132_SOC=y
-CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SPRD=y
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_VEXPRESS=y
@@ -60,6 +59,14 @@ CONFIG_CMDLINE="console=ttyAMA0"
CONFIG_COMPAT=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPUFREQ_DT=y
+# CONFIG_ARM_TEGRA_CPUFREQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -70,7 +77,19 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
CONFIG_BPF_JIT=y
-# CONFIG_WIRELESS is not set
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTUSB=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_QCOMSMD=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=y
CONFIG_NET_9P=y
CONFIG_NET_9P_VIRTIO=y
# CONFIG_TEGRA_AHB is not set
@@ -78,6 +97,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=64
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
# CONFIG_SCSI_PROC_FS is not set
@@ -97,7 +117,12 @@ CONFIG_NET_XGENE=y
CONFIG_SKY2=y
CONFIG_SMC91X=y
CONFIG_SMSC911X=y
-# CONFIG_WLAN is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_DM9601=y
+CONFIG_USB_NET_PLUSB=y
+CONFIG_ATH_CARDS=y
+CONFIG_WCN36XX=m
+CONFIG_WCN36XX_DEBUGFS=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_SERIO_SERPORT is not set
@@ -116,51 +141,125 @@ CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QUP=y
CONFIG_SPI=y
CONFIG_SPI_PL022=y
+CONFIG_SPI_QUP=m
+CONFIG_SPMI=y
CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_GPIO_PL061=y
+CONFIG_GPIO_QCOM_SMSM=y
+CONFIG_GPIO_QCOM_SMP2P=y
CONFIG_GPIO_XGENE=y
+CONFIG_POWER_RESET_MSM=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_AVS=y
+CONFIG_QCOM_CPR=y
# CONFIG_HWMON is not set
-CONFIG_REGULATOR=y
+CONFIG_THERMAL=y
+CONFIG_CPU_THERMAL=y
+CONFIG_QCOM_TSENS=y
+CONFIG_MFD_QCOM_RPM=y
+CONFIG_MFD_SPMI_PMIC=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_FB=y
+CONFIG_REGULATOR_QCOM_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_DRM=y
+CONFIG_DRM_I2C_ADV7511=y
+# CONFIG_DRM_I2C_ADV7511_SLAVE_ENCODER is not set
CONFIG_FB_ARMCLCD=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_QCOM=y
+CONFIG_SND_SOC_APQ8016_SBC=y
+CONFIG_SND_SOC_MSM8x16_WCD=y
CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_DYNAMIC_MINORS=y
+CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_WDM=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USBIP_CORE=y
+CONFIG_USBIP_HOST=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_ISP1760=y
-CONFIG_USB_ULPI=y
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_TEST=m
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_USB_MSM_OTG=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_GADGET_VERBOSE=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SPI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_EFI=y
CONFIG_RTC_DRV_XGENE=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_ADM=y
CONFIG_VIRTIO_PCI=y
CONFIG_VIRTIO_BALLOON=y
CONFIG_VIRTIO_MMIO=y
CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_QCOM_RPMCC=y
CONFIG_MSM_GCC_8916=y
-# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_MSM_RPMCC_8064=y
+CONFIG_QCOM_A53=y
+CONFIG_QCOM_IOMMU_V1=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_SMD=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMEM=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_QCOM_Q6V5_PIL=y
+CONFIG_QCOM_TZ_PIL=y
CONFIG_PHY_XGENE=y
+CONFIG_NVMEM=y
+CONFIG_QCOM_QFPROM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
@@ -173,7 +272,6 @@ CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_EFIVAR_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
@@ -183,17 +281,24 @@ CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_VIRTUALIZATION=y
CONFIG_KVM=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_PROVE_LOCKING=y
# CONFIG_FTRACE is not set
CONFIG_MEMTEST=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SINK_TPIU=y
+CONFIG_CORESIGHT_SINK_ETBV10=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
diff --git a/arch/arm64/configs/qcom_defconfig b/arch/arm64/configs/qcom_defconfig
new file mode 100644
index 000000000000..6ece7d6477f5
--- /dev/null
+++ b/arch/arm64/configs/qcom_defconfig
@@ -0,0 +1,274 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_PREEMPT=y
+CONFIG_CMA=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_SIP=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPPOE=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_THERMAL=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=m
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_RTC_CLASS=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_PWM=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=y
+CONFIG_FUSE_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_KEYS=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_TWOFISH=y
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index c75b8d027eb1..12137e615cf9 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,12 @@ static inline void flush_cache_page(struct vm_area_struct *vma,
extern void __dma_map_area(const void *, size_t, int);
extern void __dma_unmap_area(const void *, size_t, int);
extern void __dma_flush_range(const void *, const void *);
+extern void __dma_inv_range(const void *, const void *);
+extern void __dma_clean_range(const void *, const void *);
+
+#define dmac_flush_range __dma_flush_range
+#define dmac_inv_range __dma_inv_range
+#define dmac_clean_range __dma_clean_range
/*
* Copy user data from/to a page which is mapped into a different
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
index 8f03446cf89f..41bf609206ed 100644
--- a/arch/arm64/include/asm/cpu_ops.h
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -71,4 +71,9 @@ static inline void __init cpu_read_bootcpu_ops(void)
cpu_read_ops(0);
}
+#define CPU_METHOD_OF_DECLARE(name, __ops) \
+ static const struct cpu_operations *__cpu_method_table_##name \
+ __used __section(__cpu_method_of_table) \
+ = __ops;
+
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef256b8c9..908a6a53ed49 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -22,9 +22,18 @@ struct dev_archdata {
void *iommu; /* private IOMMU data */
#endif
bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+ struct dma_iommu_mapping *mapping;
+#endif
};
struct pdev_archdata {
};
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
#endif
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..a8c56acc8c98
--- /dev/null
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -0,0 +1,36 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+
+struct dma_iommu_mapping {
+ /* iommu specific data */
+ struct iommu_domain *domain;
+
+ void *bitmap;
+ size_t bits;
+ unsigned int order;
+ dma_addr_t base;
+
+ spinlock_t lock;
+ struct kref kref;
+};
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index af58dcdefb21..63bea4f70a74 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -36,6 +36,7 @@ static inline u32 mpidr_hash_size(void)
return 1 << mpidr_hash.bits;
}
+extern void secondary_holding_pen(void);
/*
* Logical CPU mapping.
*/
@@ -55,5 +56,6 @@ static inline int get_logical_index(u64 mpidr)
return cpu;
return -EINVAL;
}
+extern volatile unsigned long secondary_holding_pen_release;
#endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index b6bd7d447768..3c63506b6437 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -35,16 +35,18 @@ static const struct cpu_operations *supported_cpu_ops[] __initconst = {
NULL,
};
+extern struct cpu_operations __cpu_method_of_table[];
+static const struct cpu_operations *__cpu_method_of_table_sentinel
+ __used __section(__cpu_method_of_table_end);
+
static const struct cpu_operations * __init cpu_get_ops(const char *name)
{
- const struct cpu_operations **ops = supported_cpu_ops;
-
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+ const struct cpu_operations **start = (void *)__cpu_method_of_table;
- ops++;
- }
+ for (; *start; start++) {
+ if (!strcmp((*start)->name, name))
+ return *start;
+ };
return NULL;
}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index aa94a88f6279..d8f6991fc9c8 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -235,3 +235,4 @@ const struct cpu_operations cpu_psci_ops = {
#endif
};
+CPU_METHOD_OF_DECLARE(psci, &cpu_psci_ops);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index dbdaacddd9a5..38d3b6bf27ac 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -63,6 +63,7 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index aef3605a8c47..3d5325d99e8e 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -28,9 +28,6 @@
#include <asm/io.h>
#include <asm/smp_plat.h>
-extern void secondary_holding_pen(void);
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
-
static phys_addr_t cpu_release_addr[NR_CPUS];
/*
@@ -125,9 +122,10 @@ static int smp_spin_table_cpu_boot(unsigned int cpu)
return 0;
}
-const struct cpu_operations smp_spin_table_ops = {
+static const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
.cpu_init = smp_spin_table_cpu_init,
.cpu_prepare = smp_spin_table_cpu_prepare,
.cpu_boot = smp_spin_table_cpu_boot,
};
+CPU_METHOD_OF_DECLARE(spin_table, &smp_spin_table_ops);
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index eb48d5df4a0f..a17430174087 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -113,7 +113,7 @@ ENTRY(__inval_cache_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_inv_range:
+ENTRY(__dma_inv_range)
dcache_line_size x2, x3
sub x3, x2, #1
tst x1, x3 // end cache line aligned?
@@ -139,7 +139,7 @@ ENDPROC(__dma_inv_range)
* - start - virtual start address of region
* - end - virtual end address of region
*/
-__dma_clean_range:
+ENTRY(__dma_clean_range)
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d895529268a6..2ae5dbbe0d86 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -23,10 +23,15 @@
#include <linux/genalloc.h>
#include <linux/dma-mapping.h>
#include <linux/dma-contiguous.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+
+#include "mm.h"
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
@@ -34,7 +39,11 @@ EXPORT_SYMBOL(dma_ops);
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ return pgprot_writecombine(prot);
+ else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs))
+ prot = pgprot_noncached(prot);
+ else if (!coherent)
return pgprot_writecombine(prot);
return prot;
}
@@ -533,3 +542,1031 @@ static int __init dma_debug_do_init(void)
return 0;
}
fs_initcall(dma_debug_do_init);
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_unmap_area(page_address(page) + off, size, dir);
+
+ /*
+ * Mark the D-cache clean for this page to avoid extra flushing.
+ */
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+/* IOMMU */
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+ struct dma_attrs *attrs)
+{
+ /*
+ * Ensure that the allocated pages are zeroed, and that any data
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ void *ptr = page_address(page);
+ if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
+ memset(ptr, 0, size);
+ dmac_flush_range(ptr, ptr + size);
+}
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+ size_t size)
+{
+ unsigned int order = get_order(size);
+ unsigned int align = 0;
+ unsigned int count, start;
+ unsigned long flags;
+
+ if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
+ order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
+
+ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+
+ if (order > mapping->order)
+ align = (1 << (order - mapping->order)) - 1;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+ count, align);
+ if (start > mapping->bits) {
+ spin_unlock_irqrestore(&mapping->lock, flags);
+ return DMA_ERROR_CODE;
+ }
+
+ bitmap_set(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+
+ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+ dma_addr_t addr, size_t size)
+{
+ unsigned int start = (addr - mapping->base) >>
+ (mapping->order + PAGE_SHIFT);
+ unsigned int count = ((size >> PAGE_SHIFT) +
+ (1 << mapping->order) - 1) >> mapping->order;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->lock, flags);
+ bitmap_clear(mapping->bitmap, start, count);
+ spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+ gfp_t gfp, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i = 0;
+
+ if (array_size <= PAGE_SIZE)
+ pages = kzalloc(array_size, gfp);
+ else
+ pages = vzalloc(array_size);
+ if (!pages)
+ return NULL;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ unsigned long order = get_order(size);
+ struct page *page;
+
+ page = dma_alloc_from_contiguous(dev, count, order);
+ if (!page)
+ goto error;
+
+ __dma_clear_buffer(page, size, attrs);
+
+ for (i = 0; i < count; i++)
+ pages[i] = page + i;
+
+ return pages;
+ }
+
+ /*
+ * IOMMU can map any pages, so himem can also be used here
+ */
+ gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+ while (count) {
+ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp, order);
+ while (!pages[i] && order)
+ pages[i] = alloc_pages(gfp, --order);
+ if (!pages[i])
+ goto error;
+
+ if (order) {
+ split_page(pages[i], order);
+ j = 1 << order;
+ while (--j)
+ pages[i + j] = pages[i] + j;
+ }
+
+ __dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs);
+ i += 1 << order;
+ count -= 1 << order;
+ }
+
+ return pages;
+error:
+ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+ size_t size, struct dma_attrs *attrs)
+{
+ int count = size >> PAGE_SHIFT;
+ int array_size = count * sizeof(struct page *);
+ int i;
+
+ if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ dma_release_from_contiguous(dev, pages[0], count);
+ } else {
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ }
+
+ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+ return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
+{
+ return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t
+__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t dma_addr, iova;
+ int i, ret;
+
+ dma_addr = __alloc_iova(mapping, size);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ iova = dma_addr;
+ for (i = 0; i < count; ) {
+ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+ phys_addr_t phys = page_to_phys(pages[i]);
+ unsigned int len, j;
+
+ for (j = i + 1; j < count; j++, next_pfn++)
+ if (page_to_pfn(pages[j]) != next_pfn)
+ break;
+
+ len = (j - i) << PAGE_SHIFT;
+ ret = iommu_map(mapping->domain, iova, phys, len,
+ IOMMU_READ|IOMMU_WRITE);
+ if (ret < 0)
+ goto fail;
+ iova += len;
+ i = j;
+ }
+ return dma_addr;
+fail:
+ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+ __free_iova(mapping, dma_addr, size);
+ return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
+ size_t size)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ /*
+ * add optional in-page offset from iova to size and align
+ * result to page size
+ */
+ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, size);
+ __free_iova(mapping, iova, size);
+ return 0;
+}
+
+static struct page **__atomic_get_pages(void *addr)
+{
+ struct page *page;
+ phys_addr_t phys;
+
+ phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+ page = phys_to_page(phys);
+
+ return (struct page **)page;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+{
+ struct vm_struct *area;
+
+ if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+ return __atomic_get_pages(cpu_addr);
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return cpu_addr;
+
+ area = find_vm_area(cpu_addr);
+ if (area)
+ return area->pages;
+ return NULL;
+}
+
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t flags)
+{
+ struct page *page;
+ void *addr;
+
+ addr = __alloc_from_pool(size, &page, flags);
+ if (!addr)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, &page, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __free_from_pool(addr, size);
+ return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size)
+{
+ __iommu_remove_mapping(dev, handle, size);
+ __free_from_pool(cpu_addr, size);
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL,
+ is_device_dma_coherent(dev));
+ struct page **pages;
+ void *addr = NULL;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ if (!(gfp & __GFP_WAIT))
+ return __iommu_alloc_atomic(dev, size, handle, gfp);
+
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+ * handle them. The real problem is that this flag probably
+ * should be 0 on ARM as it is not supported on this
+ * platform; see CONFIG_HUGETLBFS.
+ */
+ gfp &= ~(__GFP_COMP);
+
+ pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+ if (!pages)
+ return NULL;
+
+ *handle = __iommu_create_mapping(dev, pages, size);
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return pages;
+
+ addr = __iommu_alloc_remap(pages, size, gfp, prot,
+ __builtin_return_address(0));
+ if (!addr)
+ goto err_mapping;
+
+ return addr;
+
+err_mapping:
+ __iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+ __iommu_free_buffer(dev, pages, size, attrs);
+ return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ unsigned long uaddr = vma->vm_start;
+ unsigned long usize = vma->vm_end - vma->vm_start;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
+ if (!pages)
+ return -ENXIO;
+
+ do {
+ int ret = vm_insert_page(vma, uaddr, *pages++);
+ if (ret) {
+ pr_err("Remapping memory failed: %d\n", ret);
+ return ret;
+ }
+ uaddr += PAGE_SIZE;
+ usize -= PAGE_SIZE;
+ } while (usize > 0);
+
+ return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+{
+ struct page **pages;
+ size = PAGE_ALIGN(size);
+
+ if (__in_atomic_pool(cpu_addr, size)) {
+ __iommu_free_atomic(dev, cpu_addr, handle, size);
+ return;
+ }
+
+ pages = __iommu_get_pages(cpu_addr, attrs);
+ if (!pages) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+
+ __iommu_remove_mapping(dev, handle, size);
+ __iommu_free_buffer(dev, pages, size, attrs);
+}
+
+int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+ if (!pages)
+ return -ENXIO;
+
+ return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+ GFP_KERNEL);
+}
+
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+ int prot;
+
+ switch (dir) {
+ case DMA_BIDIRECTIONAL:
+ prot = IOMMU_READ | IOMMU_WRITE;
+ break;
+ case DMA_TO_DEVICE:
+ prot = IOMMU_READ;
+ break;
+ case DMA_FROM_DEVICE:
+ prot = IOMMU_WRITE;
+ break;
+ default:
+ prot = 0;
+ }
+
+ return prot;
+}
+
+/*
+ * Map a part of the scatter-gather list into contiguous io address space
+ */
+static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova, iova_base;
+ int ret = 0;
+ unsigned int count;
+ struct scatterlist *s;
+ int prot;
+
+ size = PAGE_ALIGN(size);
+ *handle = DMA_ERROR_CODE;
+
+ iova_base = iova = __alloc_iova(mapping, size);
+ if (iova == DMA_ERROR_CODE)
+ return -ENOMEM;
+
+ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
+ phys_addr_t phys = page_to_phys(sg_page(s));
+ unsigned int len = PAGE_ALIGN(s->offset + s->length);
+
+ if (!is_coherent &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length,
+ dir);
+
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, iova, phys, len, prot);
+ if (ret < 0)
+ goto fail;
+ count += len >> PAGE_SHIFT;
+ iova += len;
+ }
+ *handle = iova_base;
+
+ return 0;
+fail:
+ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
+ __free_iova(mapping, iova_base, size);
+ return ret;
+}
+
+static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct scatterlist *s = sg, *dma = sg, *start = sg;
+ int i, count = 0;
+ unsigned int offset = s->offset;
+ unsigned int size = s->offset + s->length;
+ unsigned int max = dma_get_max_seg_size(dev);
+
+ for (i = 1; i < nents; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_ERROR_CODE;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK)
+ || size + s->length > max) {
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
+ dir, attrs, is_coherent) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count += 1;
+ }
+ size += s->length;
+ }
+ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
+ is_coherent) < 0)
+ goto bad_mapping;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count+1;
+
+bad_mapping:
+ for_each_sg(sg, s, count, i)
+ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
+ return 0;
+}
+
+/**
+ * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of i/o coherent buffers described by scatterlist in streaming
+ * mode for DMA. The scatter gather list elements are merged together (if
+ * possible) and tagged with the appropriate dma address and length. They are
+ * obtained via sg_dma_{address,length}.
+ */
+int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
+}
+
+static int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
+ struct scatterlist *sg, unsigned int len, int opt)
+{
+ s32 ret = 0;
+ u32 offset = 0;
+ u32 start_iova = iova;
+
+ BUG_ON(iova & (~PAGE_MASK));
+
+ while (offset < len) {
+ phys_addr_t phys = page_to_phys(sg_page(sg));
+ u32 page_len = PAGE_ALIGN(sg->offset + sg->length);
+
+ ret = iommu_map(domain, iova, phys, page_len, opt);
+ if (ret)
+ goto fail;
+
+ iova += page_len;
+ offset += page_len;
+ if (offset < len)
+ sg = sg_next(sg);
+ }
+
+ goto out;
+
+fail:
+ /* undo mappings already done in case of error */
+ iommu_unmap(domain, start_iova, offset);
+out:
+
+ return ret;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct scatterlist *s;
+ int ret, i;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int iova, total_length = 0, current_offset = 0;
+ int prot = __dma_direction_to_prot(dir);
+
+ for_each_sg(sg, s, nents, i)
+ total_length += s->length;
+
+ iova = __alloc_iova(mapping, total_length);
+ ret = iommu_map_range(mapping->domain, iova, sg, total_length, prot);
+ if (ret) {
+ __free_iova(mapping, iova, total_length);
+ return 0;
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = iova + current_offset;
+ s->dma_length = total_length - current_offset;
+ current_offset += s->length;
+ }
+
+ return nents;
+}
+
+static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
+ bool is_coherent)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+ if (sg_dma_len(s))
+ __iommu_remove_mapping(dev, sg_dma_address(s),
+ sg_dma_len(s));
+ if (!is_coherent &&
+ !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+}
+
+/**
+ * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ unsigned int total_length = sg_dma_len(sg);
+ unsigned int iova = sg_dma_address(sg);
+
+ total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+ iova &= PAGE_MASK;
+
+ iommu_unmap(mapping->domain, iova, total_length);
+ __free_iova(mapping, iova, total_length);
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_coherent_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Coherent IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t dma_addr;
+ int ret, prot, len = PAGE_ALIGN(size + offset);
+
+ dma_addr = __alloc_iova(mapping, len);
+ if (dma_addr == DMA_ERROR_CODE)
+ return dma_addr;
+
+ prot = __dma_direction_to_prot(dir);
+
+ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
+ prot);
+ if (ret < 0)
+ goto fail;
+
+ return dma_addr + offset;
+fail:
+ __free_iova(mapping, dma_addr, len);
+ return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+
+ return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_coherent_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * Coherent IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ int offset = handle & ~PAGE_MASK;
+ int len = PAGE_ALIGN(size + offset);
+
+ if (!iova)
+ return;
+
+ if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+
+ iommu_unmap(mapping->domain, iova, len);
+ __free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+ dma_addr_t iova = handle & PAGE_MASK;
+ struct page *page = phys_to_page(iommu_iova_to_phys(
+ mapping->domain, iova));
+ unsigned int offset = handle & ~PAGE_MASK;
+
+ if (!iova)
+ return;
+
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+const struct dma_map_ops iommu_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
+
+ .map_page = arm_iommu_map_page,
+ .unmap_page = arm_iommu_unmap_page,
+ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
+ .sync_single_for_device = arm_iommu_sync_single_for_device,
+
+ .map_sg = arm_iommu_map_sg,
+ .unmap_sg = arm_iommu_unmap_sg,
+ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
+ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
+
+ .set_dma_mask = arm_dma_set_mask,
+};
+
+const struct dma_map_ops iommu_coherent_ops = {
+ .alloc = arm_iommu_alloc_attrs,
+ .free = arm_iommu_free_attrs,
+ .mmap = arm_iommu_mmap_attrs,
+ .get_sgtable = arm_iommu_get_sgtable,
+
+ .map_page = arm_coherent_iommu_map_page,
+ .unmap_page = arm_coherent_iommu_unmap_page,
+
+ .map_sg = arm_coherent_iommu_map_sg,
+ .unmap_sg = arm_coherent_iommu_unmap_sg,
+
+ .set_dma_mask = arm_dma_set_mask,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: size of the valid IO address space
+ * @order: accuracy of the IO addresses allocations
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
+ int order)
+{
+ unsigned int count = size >> (PAGE_SHIFT + order);
+ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+ struct dma_iommu_mapping *mapping;
+ int err = -ENOMEM;
+
+ if (!count)
+ return ERR_PTR(-EINVAL);
+
+ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+ if (!mapping)
+ goto err;
+
+ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!mapping->bitmap)
+ goto err2;
+
+ mapping->base = base;
+ mapping->bits = BITS_PER_BYTE * bitmap_size;
+ mapping->order = order;
+ spin_lock_init(&mapping->lock);
+
+ mapping->domain = iommu_domain_alloc(bus);
+ if (!mapping->domain)
+ goto err3;
+
+ kref_init(&mapping->kref);
+ return mapping;
+err3:
+ kfree(mapping->bitmap);
+err2:
+ kfree(mapping);
+err:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static void release_iommu_mapping(struct kref *kref)
+{
+ struct dma_iommu_mapping *mapping =
+ container_of(kref, struct dma_iommu_mapping, kref);
+
+ iommu_domain_free(mapping->domain);
+ kfree(mapping->bitmap);
+ kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+ if (mapping)
+ kref_put(&mapping->kref, release_iommu_mapping);
+}
+EXPORT_SYMBOL(arm_iommu_release_mapping);
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ * arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+ dev->archdata.dma_ops = &iommu_ops;
+ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+EXPORT_SYMBOL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ struct dma_iommu_mapping *mapping;
+
+ mapping = to_dma_iommu_mapping(dev);
+ if (!mapping) {
+ dev_warn(dev, "Not attached\n");
+ return;
+ }
+
+ iommu_detach_device(mapping->domain, dev);
+ kref_put(&mapping->kref, release_iommu_mapping);
+ dev->archdata.mapping = NULL;
+ dev->archdata.dma_ops = NULL;
+
+ pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL(arm_iommu_detach_device);
+
+#endif
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..65438123f087 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -24,11 +24,80 @@
#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
+static int amba_get_enable_pclk(struct amba_device *pcdev)
+{
+ int ret;
+
+ pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
+ if (IS_ERR(pcdev->pclk))
+ return PTR_ERR(pcdev->pclk);
+
+ ret = clk_prepare_enable(pcdev->pclk);
+ if (ret)
+ clk_put(pcdev->pclk);
+
+ return ret;
+}
+
+static void amba_put_disable_pclk(struct amba_device *pcdev)
+{
+ clk_disable_unprepare(pcdev->pclk);
+ clk_put(pcdev->pclk);
+}
+
+static int amba_read_periphid(struct amba_device *adev)
+{
+ resource_size_t size;
+ void __iomem *tmp;
+ int r, i;
+
+ if (adev->periphid)
+ return 0;
+
+ /*
+ * Dynamically calculate the size of the resource
+ * and use this for iomap
+ */
+ size = resource_size(&adev->res);
+ tmp = ioremap(adev->res.start, size);
+ if (!tmp)
+ return -ENODEV;
+
+ r = amba_get_enable_pclk(adev);
+ if (r == 0) {
+ u32 pid, cid;
+
+ /*
+ * Read pid and cid based on size of resource
+ * they are located at end of region
+ */
+ for (pid = 0, i = 0; i < 4; i++)
+ pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) <<
+ (i * 8);
+ for (cid = 0, i = 0; i < 4; i++)
+ cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
+ (i * 8);
+
+ amba_put_disable_pclk(adev);
+
+ if (cid == AMBA_CID || cid == CORESIGHT_CID)
+ adev->periphid = pid;
+
+ }
+
+ iounmap(tmp);
+
+ return adev->periphid ? 0 : -ENODEV;
+}
+
static const struct amba_id *
amba_lookup(const struct amba_id *table, struct amba_device *dev)
{
int ret = 0;
+ if (amba_read_periphid(dev))
+ return NULL;
+
while (table->mask) {
ret = (dev->periphid & table->mask) == table->id;
if (ret)
@@ -204,27 +273,6 @@ static int __init amba_init(void)
postcore_initcall(amba_init);
-static int amba_get_enable_pclk(struct amba_device *pcdev)
-{
- int ret;
-
- pcdev->pclk = clk_get(&pcdev->dev, "apb_pclk");
- if (IS_ERR(pcdev->pclk))
- return PTR_ERR(pcdev->pclk);
-
- ret = clk_prepare_enable(pcdev->pclk);
- if (ret)
- clk_put(pcdev->pclk);
-
- return ret;
-}
-
-static void amba_put_disable_pclk(struct amba_device *pcdev)
-{
- clk_disable_unprepare(pcdev->pclk);
- clk_put(pcdev->pclk);
-}
-
/*
* These are the device model conversion veneers; they convert the
* device model structures to our more specific structures.
@@ -347,9 +395,7 @@ static void amba_device_release(struct device *dev)
*/
int amba_device_add(struct amba_device *dev, struct resource *parent)
{
- u32 size;
- void __iomem *tmp;
- int i, ret;
+ int ret;
WARN_ON(dev->irq[0] == (unsigned int)-1);
WARN_ON(dev->irq[1] == (unsigned int)-1);
@@ -358,51 +404,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
if (ret)
goto err_out;
- /* Hard-coded primecell ID instead of plug-n-play */
- if (dev->periphid != 0)
- goto skip_probe;
-
- /*
- * Dynamically calculate the size of the resource
- * and use this for iomap
- */
- size = resource_size(&dev->res);
- tmp = ioremap(dev->res.start, size);
- if (!tmp) {
- ret = -ENOMEM;
- goto err_release;
- }
-
- ret = amba_get_enable_pclk(dev);
- if (ret == 0) {
- u32 pid, cid;
-
- /*
- * Read pid and cid based on size of resource
- * they are located at end of region
- */
- for (pid = 0, i = 0; i < 4; i++)
- pid |= (readl(tmp + size - 0x20 + 4 * i) & 255) <<
- (i * 8);
- for (cid = 0, i = 0; i < 4; i++)
- cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
- (i * 8);
-
- amba_put_disable_pclk(dev);
-
- if (cid == AMBA_CID || cid == CORESIGHT_CID)
- dev->periphid = pid;
-
- if (!dev->periphid)
- ret = -ENODEV;
- }
-
- iounmap(tmp);
-
- if (ret)
- goto err_release;
-
- skip_probe:
ret = device_add(&dev->dev);
if (ret)
goto err_release;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f80aaaf9f610..af0dd0284675 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -512,6 +512,10 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
+ ret = of_dma_configure_ops(_dev, _dev->of_node);
+ if (ret < 0)
+ goto done;
+
ret = dev_pm_domain_attach(_dev, true);
if (ret != -EPROBE_DEFER) {
ret = drv->probe(dev);
@@ -519,6 +523,10 @@ static int platform_drv_probe(struct device *_dev)
dev_pm_domain_detach(_dev, true);
}
+ if (ret)
+ of_dma_deconfigure(_dev);
+
+done:
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
ret = -ENXIO;
@@ -540,6 +548,7 @@ static int platform_drv_remove(struct device *_dev)
ret = drv->remove(dev);
dev_pm_domain_detach(_dev, true);
+ of_dma_deconfigure(_dev);
return ret;
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 7ae7cd990fbf..c2afba212539 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -146,9 +146,10 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
-#define opp_rcu_lockdep_assert() \
+#define opp_rcu_lockdep_assert(s) \
do { \
RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \
+ !(s && srcu_read_lock_held(s)) && \
!lockdep_is_held(&dev_opp_list_lock), \
"Missing rcu_read_lock() or " \
"dev_opp_list_lock protection"); \
@@ -236,9 +237,10 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long v = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
- tmp_opp = rcu_dereference(opp);
+ tmp_opp = srcu_dereference_check(opp, &opp->dev_opp->srcu_head.srcu,
+ rcu_read_lock_held());
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
@@ -268,9 +270,10 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
struct dev_pm_opp *tmp_opp;
unsigned long f = 0;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
- tmp_opp = rcu_dereference(opp);
+ tmp_opp = srcu_dereference_check(opp, &opp->dev_opp->srcu_head.srcu,
+ rcu_read_lock_held());
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
pr_err("%s: Invalid parameters\n", __func__);
else
@@ -302,7 +305,7 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
{
struct dev_pm_opp *tmp_opp;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(&opp->dev_opp->srcu_head.srcu);
tmp_opp = rcu_dereference(opp);
if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
@@ -357,7 +360,7 @@ struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
{
struct device_opp *dev_opp;
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp) || !dev_opp->suspend_opp ||
@@ -437,7 +440,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
dev_opp = _find_device_opp(dev);
if (IS_ERR(dev_opp)) {
@@ -485,7 +488,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -535,7 +538,7 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
- opp_rcu_lockdep_assert();
+ opp_rcu_lockdep_assert(NULL);
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -1143,6 +1146,83 @@ unlock:
}
/**
+ * dev_pm_opp_adjust_voltage() - helper to change the voltage of an opp
+ * @dev: device for which we do this operation
+ * @freq: OPP frequency to adjust voltage of
+ * @u_volt: new OPP voltage
+ *
+ * Change the voltage of an OPP with an RCU operation.
+ *
+ * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
+ * copy operation, returns 0 if no modifcation was done OR modification was
+ * successful.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks to
+ * keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex locking or synchronize_rcu() blocking calls cannot be used.
+ */
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct device_opp *dev_opp;
+ struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
+ int r = 0;
+
+ /* keep the node allocated */
+ new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
+ if (!new_opp)
+ return -ENOMEM;
+
+ mutex_lock(&dev_opp_list_lock);
+
+ /* Find the device_opp */
+ dev_opp = _find_device_opp(dev);
+ if (IS_ERR(dev_opp)) {
+ r = PTR_ERR(dev_opp);
+ dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
+ goto unlock;
+ }
+
+ /* Do we have the frequency? */
+ list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
+ if (tmp_opp->rate == freq) {
+ opp = tmp_opp;
+ break;
+ }
+ }
+ if (IS_ERR(opp)) {
+ r = PTR_ERR(opp);
+ goto unlock;
+ }
+
+ /* Is update really needed? */
+ if (opp->u_volt == u_volt)
+ goto unlock;
+ /* copy the old data over */
+ *new_opp = *opp;
+
+ /* plug in new node */
+ new_opp->u_volt = u_volt;
+
+ list_replace_rcu(&opp->node, &new_opp->node);
+ mutex_unlock(&dev_opp_list_lock);
+ call_srcu(&dev_opp->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
+
+ /* Notify the change of the OPP */
+ srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADJUST_VOLTAGE,
+ new_opp);
+
+ return 0;
+
+unlock:
+ mutex_unlock(&dev_opp_list_lock);
+ kfree(new_opp);
+ return r;
+}
+
+/**
* dev_pm_opp_enable() - Enable a specific OPP
* @dev: device for which we do this operation
* @freq: OPP frequency to enable
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 0bd88c942a52..da65af6fe1f5 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -168,6 +168,17 @@ config BT_HCIUART_QCA
Say Y here to compile support for QCA protocol.
+config BT_QCOMSMD
+ tristate "Qualcomm SMD based HCI support"
+ depends on QCOM_SMD
+ help
+ Qualcomm SMD based HCI driver.
+ This driver is used to bridge HCI data onto the shared memory
+ channels to the WCNSS core.
+
+ Say Y here to compile support for HCI over Qualcomm SMD into the
+ kernelor say M to compile as a module.
+
config BT_HCIBCM203X
tristate "HCI BCM203x USB driver"
depends on USB
diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile
index 07c9cf381e5a..19e313bf8c39 100644
--- a/drivers/bluetooth/Makefile
+++ b/drivers/bluetooth/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_BT_WILINK) += btwilink.o
obj-$(CONFIG_BT_BCM) += btbcm.o
obj-$(CONFIG_BT_RTL) += btrtl.o
obj-$(CONFIG_BT_QCA) += btqca.o
+obj-$(CONFIG_BT_QCOMSMD) += btqcomsmd.o
btmrvl-y := btmrvl_main.o
btmrvl-$(CONFIG_DEBUG_FS) += btmrvl_debugfs.o
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
new file mode 100644
index 000000000000..4b91c830531e
--- /dev/null
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -0,0 +1,198 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/smd.h>
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+#include <net/bluetooth/hci.h>
+
+#define EDL_NVM_ACCESS_SET_REQ_CMD 0x01
+#define EDL_NVM_ACCESS_OPCODE 0xfc0b
+
+struct btqcomsmd {
+ struct qcom_smd_channel *acl_channel;
+ struct qcom_smd_channel *cmd_channel;
+};
+
+static int btqcomsmd_recv(struct hci_dev *hdev,
+ unsigned type,
+ const void *data,
+ size_t count)
+{
+ struct sk_buff *skb;
+ void *buf;
+
+ /* Use GFP_ATOMIC as we're in IRQ context */
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ bt_cb(skb)->pkt_type = type;
+
+ /* Use io accessor as data might be ioremapped */
+ buf = skb_put(skb, count);
+ memcpy_fromio(buf, data, count);
+
+ return hci_recv_frame(hdev, skb);
+}
+
+static int btqcomsmd_acl_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ struct hci_dev *hdev = dev_get_drvdata(&qsdev->dev);
+
+ return btqcomsmd_recv(hdev, HCI_ACLDATA_PKT, data, count);
+}
+
+static int btqcomsmd_cmd_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ struct hci_dev *hdev = dev_get_drvdata(&qsdev->dev);
+
+ return btqcomsmd_recv(hdev, HCI_EVENT_PKT, data, count);
+}
+
+static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb)
+{
+ struct btqcomsmd *btq = hci_get_drvdata(hdev);
+ int ret;
+
+ switch (bt_cb(skb)->pkt_type) {
+ case HCI_ACLDATA_PKT:
+ case HCI_SCODATA_PKT:
+ ret = qcom_smd_send(btq->acl_channel, skb->data, skb->len);
+ break;
+ case HCI_COMMAND_PKT:
+ ret = qcom_smd_send(btq->cmd_channel, skb->data, skb->len);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ return ret;
+}
+
+static int btqcomsmd_open(struct hci_dev *hdev)
+{
+ set_bit(HCI_RUNNING, &hdev->flags);
+ return 0;
+}
+
+static int btqcomsmd_close(struct hci_dev *hdev)
+{
+ clear_bit(HCI_RUNNING, &hdev->flags);
+ return 0;
+}
+
+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 cmd[9];
+ int err;
+
+ cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD;
+ cmd[1] = 0x02; /* TAG ID */
+ cmd[2] = sizeof(bdaddr_t); /* size */
+ memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t));
+ skb = __hci_cmd_sync_ev(hdev,
+ EDL_NVM_ACCESS_OPCODE,
+ sizeof(cmd), cmd,
+ HCI_VENDOR_PKT, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ BT_ERR("%s: Change address command failed (%d)",
+ hdev->name, err);
+ return err;
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+static int btqcomsmd_probe(struct qcom_smd_device *sdev)
+{
+ struct qcom_smd_channel *acl;
+ struct btqcomsmd *btq;
+ struct hci_dev *hdev;
+ int ret;
+
+ acl = qcom_smd_open_channel(sdev,
+ "APPS_RIVA_BT_ACL",
+ btqcomsmd_acl_callback);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+
+ btq = devm_kzalloc(&sdev->dev, sizeof(*btq), GFP_KERNEL);
+ if (!btq)
+ return -ENOMEM;
+
+ btq->acl_channel = acl;
+ btq->cmd_channel = sdev->channel;
+
+ hdev = hci_alloc_dev();
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->bus = HCI_SMD;
+ hdev->open = btqcomsmd_open;
+ hdev->close = btqcomsmd_close;
+ hdev->send = btqcomsmd_send;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;
+
+ ret = hci_register_dev(hdev);
+ if (ret < 0) {
+ hci_free_dev(hdev);
+ return ret;
+ }
+
+ hci_set_drvdata(hdev, btq);
+ dev_set_drvdata(&sdev->dev, hdev);
+
+ return 0;
+}
+
+static void btqcomsmd_remove(struct qcom_smd_device *sdev)
+{
+ struct hci_dev *hdev = dev_get_drvdata(&sdev->dev);;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+}
+
+static const struct qcom_smd_id btqcomsmd_match[] = {
+ { .name = "APPS_RIVA_BT_CMD" },
+ {}
+};
+
+static struct qcom_smd_driver btqcomsmd_cmd_driver = {
+ .probe = btqcomsmd_probe,
+ .remove = btqcomsmd_remove,
+ .callback = btqcomsmd_cmd_callback,
+ .smd_match_table = btqcomsmd_match,
+ .driver = {
+ .name = "btqcomsmd",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_qcom_smd_driver(btqcomsmd_cmd_driver);
+
+MODULE_DESCRIPTION("Qualcomm SMD HCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 7129c86a79db..2c97af0f978c 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -28,35 +28,25 @@
#define to_clk_mux(_hw) container_of(_hw, struct clk_mux, hw)
-static u8 clk_mux_get_parent(struct clk_hw *hw)
+unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val,
+ unsigned int *table, unsigned long flags)
{
struct clk_mux *mux = to_clk_mux(hw);
int num_parents = clk_hw_get_num_parents(hw);
- u32 val;
- /*
- * FIXME need a mux-specific flag to determine if val is bitwise or numeric
- * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
- * to 0x7 (index starts at one)
- * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
- * val = 0x4 really means "bit 2, index starts at bit 0"
- */
- val = clk_readl(mux->reg) >> mux->shift;
- val &= mux->mask;
-
- if (mux->table) {
+ if (table) {
int i;
for (i = 0; i < num_parents; i++)
- if (mux->table[i] == val)
+ if (table[i] == val)
return i;
return -EINVAL;
}
- if (val && (mux->flags & CLK_MUX_INDEX_BIT))
+ if (val && (flags & CLK_MUX_INDEX_BIT))
val = ffs(val) - 1;
- if (val && (mux->flags & CLK_MUX_INDEX_ONE))
+ if (val && (flags & CLK_MUX_INDEX_ONE))
val--;
if (val >= num_parents)
@@ -64,24 +54,53 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
return val;
}
+EXPORT_SYMBOL_GPL(clk_mux_get_parent);
-static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+static u8 _clk_mux_get_parent(struct clk_hw *hw)
{
struct clk_mux *mux = to_clk_mux(hw);
u32 val;
- unsigned long flags = 0;
- if (mux->table)
- index = mux->table[index];
+ /*
+ * FIXME need a mux-specific flag to determine if val is bitwise or numeric
+ * e.g. sys_clkin_ck's clksel field is 3 bits wide, but ranges from 0x1
+ * to 0x7 (index starts at one)
+ * OTOH, pmd_trace_clk_mux_ck uses a separate bit for each clock, so
+ * val = 0x4 really means "bit 2, index starts at bit 0"
+ */
+ val = clk_readl(mux->reg) >> mux->shift;
+ val &= mux->mask;
- else {
- if (mux->flags & CLK_MUX_INDEX_BIT)
- index = 1 << index;
+ return clk_mux_get_parent(hw, val, mux->table, mux->flags);
+}
- if (mux->flags & CLK_MUX_INDEX_ONE)
- index++;
+unsigned int clk_mux_reindex(u8 index, unsigned int *table,
+ unsigned long flags)
+{
+ unsigned int val = index;
+
+ if (table) {
+ val = table[val];
+ } else {
+ if (flags & CLK_MUX_INDEX_BIT)
+ val = 1 << index;
+
+ if (flags & CLK_MUX_INDEX_ONE)
+ val++;
}
+ return val;
+}
+EXPORT_SYMBOL_GPL(clk_mux_reindex);
+
+static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_mux *mux = to_clk_mux(hw);
+ u32 val;
+ unsigned long flags = 0;
+
+ index = clk_mux_reindex(index, mux->table, mux->flags);
+
if (mux->lock)
spin_lock_irqsave(mux->lock, flags);
else
@@ -105,14 +124,14 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
}
const struct clk_ops clk_mux_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
.set_parent = clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate,
};
EXPORT_SYMBOL_GPL(clk_mux_ops);
const struct clk_ops clk_mux_ro_ops = {
- .get_parent = clk_mux_get_parent,
+ .get_parent = _clk_mux_get_parent,
};
EXPORT_SYMBOL_GPL(clk_mux_ro_ops);
@@ -120,7 +139,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock)
+ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock)
{
struct clk_mux *mux;
struct clk *clk;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0ebcf449778a..1fa4aaaa6d65 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -51,9 +51,13 @@ struct clk_core {
struct clk_core **parents;
u8 num_parents;
u8 new_parent_index;
+ u8 safe_parent_index;
unsigned long rate;
unsigned long req_rate;
+ unsigned long old_rate;
unsigned long new_rate;
+ unsigned long safe_freq;
+ struct clk_core *safe_parent;
struct clk_core *new_parent;
struct clk_core *new_child;
unsigned long flags;
@@ -1266,7 +1270,9 @@ out:
static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
struct clk_core *new_parent, u8 p_index)
{
- struct clk_core *child;
+ struct clk_core *child, *parent;
+ struct clk_hw *parent_hw;
+ unsigned long safe_freq = 0;
core->new_rate = new_rate;
core->new_parent = new_parent;
@@ -1276,6 +1282,23 @@ static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
if (new_parent && new_parent != core->parent)
new_parent->new_child = core;
+ if (core->ops->get_safe_parent) {
+ parent_hw = core->ops->get_safe_parent(core->hw, &safe_freq);
+ if (parent_hw) {
+ parent = parent_hw->core;
+ p_index = clk_fetch_parent_index(core, parent);
+ core->safe_parent_index = p_index;
+ core->safe_parent = parent;
+ if (safe_freq)
+ core->safe_freq = safe_freq;
+ else
+ core->safe_freq = 0;
+ }
+ } else {
+ core->safe_parent = NULL;
+ core->safe_freq = 0;
+ }
+
hlist_for_each_entry(child, &core->children, child_node) {
child->new_rate = clk_recalc(child, new_rate);
clk_calc_subtree(child, child->new_rate, NULL, 0);
@@ -1388,14 +1411,51 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
unsigned long event)
{
struct clk_core *child, *tmp_clk, *fail_clk = NULL;
+ struct clk_core *old_parent;
int ret = NOTIFY_DONE;
- if (core->rate == core->new_rate)
+ if (core->rate == core->new_rate && event != POST_RATE_CHANGE)
return NULL;
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ if (core->safe_parent) {
+ if (core->safe_freq)
+ core->ops->set_rate_and_parent(core->hw,
+ core->safe_freq,
+ core->safe_parent->rate,
+ core->safe_parent_index);
+ else
+ core->ops->set_parent(core->hw,
+ core->safe_parent_index);
+ }
+ core->old_rate = core->rate;
+ break;
+ case POST_RATE_CHANGE:
+ if (core->safe_parent) {
+ old_parent = __clk_set_parent_before(core,
+ core->new_parent);
+ if (core->ops->set_rate_and_parent) {
+ core->ops->set_rate_and_parent(core->hw,
+ core->new_rate,
+ core->new_parent ?
+ core->new_parent->rate : 0,
+ core->new_parent_index);
+ } else if (core->ops->set_parent) {
+ core->ops->set_parent(core->hw,
+ core->new_parent_index);
+ }
+ __clk_set_parent_after(core, core->new_parent,
+ old_parent);
+ }
+ break;
+ }
+
if (core->notifier_count) {
- ret = __clk_notify(core, event, core->rate, core->new_rate);
- if (ret & NOTIFY_STOP_MASK)
+ if (event != POST_RATE_CHANGE || core->old_rate != core->rate)
+ ret = __clk_notify(core, event, core->old_rate,
+ core->new_rate);
+ if (ret & NOTIFY_STOP_MASK && event != POST_RATE_CHANGE)
fail_clk = core;
}
@@ -1422,23 +1482,19 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
* walk down a subtree and set the new rates notifying the rate
* change on the way
*/
-static void clk_change_rate(struct clk_core *core)
+static void
+clk_change_rate(struct clk_core *core, unsigned long best_parent_rate)
{
struct clk_core *child;
struct hlist_node *tmp;
unsigned long old_rate;
- unsigned long best_parent_rate = 0;
bool skip_set_rate = false;
struct clk_core *old_parent;
old_rate = core->rate;
- if (core->new_parent)
- best_parent_rate = core->new_parent->rate;
- else if (core->parent)
- best_parent_rate = core->parent->rate;
-
- if (core->new_parent && core->new_parent != core->parent) {
+ if (core->new_parent && core->new_parent != core->parent &&
+ !core->safe_parent) {
old_parent = __clk_set_parent_before(core, core->new_parent);
trace_clk_set_parent(core, core->new_parent);
@@ -1463,6 +1519,7 @@ static void clk_change_rate(struct clk_core *core)
trace_clk_set_rate_complete(core, core->new_rate);
core->rate = clk_recalc(core, best_parent_rate);
+ core->rate = core->new_rate;
if (core->notifier_count && old_rate != core->rate)
__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
@@ -1478,12 +1535,13 @@ static void clk_change_rate(struct clk_core *core)
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
continue;
- clk_change_rate(child);
+ if (child->new_rate != child->rate)
+ clk_change_rate(child, core->new_rate);
}
- /* handle the new child who might not be in core->children yet */
- if (core->new_child)
- clk_change_rate(core->new_child);
+ /* handle the new child who might not be in clk->children yet */
+ if (core->new_child && core->new_child->new_rate != core->new_child->rate)
+ clk_change_rate(core->new_child, core->new_rate);
}
static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1492,6 +1550,7 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
struct clk_core *top, *fail_clk;
unsigned long rate = req_rate;
int ret = 0;
+ unsigned long parent_rate;
if (!core)
return 0;
@@ -1517,11 +1576,18 @@ static int clk_core_set_rate_nolock(struct clk_core *core,
return -EBUSY;
}
+ if (top->parent)
+ parent_rate = top->parent->rate;
+ else
+ parent_rate = 0;
+
/* change the rates */
- clk_change_rate(top);
+ clk_change_rate(top, parent_rate);
core->req_rate = req_rate;
+ clk_propagate_rate_change(top, POST_RATE_CHANGE);
+
return ret;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 59d16668bdf5..5b85d27d8622 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -5,8 +5,23 @@ config COMMON_CLK_QCOM
select REGMAP_MMIO
select RESET_CONTROLLER
+config QCOM_CLK_SMD_RPM
+ tristate "Qualcomm SMD based RPM clock driver"
+ depends on QCOM_SMD_RPM && COMMON_CLK_QCOM
+ help
+ Support for the clocks exposed by the Resource Power Manager
+ processor found in Qualcomm based devices like apq8016.
+
+config QCOM_RPMCC
+ tristate "Qualcomm RPM Clock Controller"
+ depends on QCOM_CLK_SMD_RPM
+ help
+ Support for the clocks exposed by the Resource Power Manager
+ processor on devices like apq8016, apq8084 and msm8974.
+
config APQ_GCC_8084
tristate "APQ8084 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on apq8084 devices.
@@ -16,6 +31,7 @@ config APQ_GCC_8084
config APQ_MMCC_8084
tristate "APQ8084 Multimedia Clock Controller"
select APQ_GCC_8084
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on apq8084 devices.
@@ -39,6 +55,11 @@ config IPQ_LCC_806X
Say Y if you want to use audio devices such as i2s, pcm,
S/PDIF, etc.
+config QCOM_GDSC
+ bool
+ select PM_GENERIC_DOMAINS if PM
+ depends on COMMON_CLK_QCOM
+
config MSM_GCC_8660
tristate "MSM8660 Global Clock Controller"
depends on COMMON_CLK_QCOM
@@ -49,6 +70,7 @@ config MSM_GCC_8660
config MSM_GCC_8916
tristate "MSM8916 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8916 devices.
@@ -83,6 +105,7 @@ config MSM_MMCC_8960
config MSM_GCC_8974
tristate "MSM8974 Global Clock Controller"
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on msm8974 devices.
@@ -92,8 +115,53 @@ config MSM_GCC_8974
config MSM_MMCC_8974
tristate "MSM8974 Multimedia Clock Controller"
select MSM_GCC_8974
+ select QCOM_GDSC
depends on COMMON_CLK_QCOM
help
Support for the multimedia clock controller on msm8974 devices.
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+
+config QCOM_HFPLL
+ tristate "High-Frequency PLL (HFPLL) Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the high-frequency PLLs present on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8974, APQ8084, etc.
+
+config KPSS_XCC
+ tristate "KPSS Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the Krait ACC and GCC clock controllers. Say Y
+ if you want to support CPU frequency scaling on devices such
+ as MSM8960, APQ8064, etc.
+
+config KRAITCC
+ tristate "Krait Clock Controller"
+ depends on COMMON_CLK_QCOM && ARM
+ select KRAIT_CLOCKS
+ help
+ Support for the Krait CPU clocks on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling.
+
+config KRAIT_CLOCKS
+ bool
+ select KRAIT_L2_ACCESSORS
+
+config MSM_RPMCC_8064
+ tristate "MSM8064 RPM Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the RPM clock controller which controls fabric clocks
+ on SoCs like MSM8064/APQ8064.
+ Say Y if you want to use fabric clocks like AFAB, DAYTONA etc.
+
+config QCOM_A53
+ tristate "A53 Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the A53 clock controller on Qualcomm devices.
+ Say Y if you want to support CPU frequency scaling on devices
+ such as MSM8916.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 50b337a24a87..dbc98a97c0ca 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -8,7 +8,13 @@ clk-qcom-y += clk-rcg2.o
clk-qcom-y += clk-branch.o
clk-qcom-y += clk-regmap-divider.o
clk-qcom-y += clk-regmap-mux.o
+clk-qcom-y += clk-regmap-mux-div.o
+clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
+clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+clk-qcom-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+clk-qcom-$(CONFIG_QCOM_RPMCC) += rpmcc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
@@ -18,6 +24,11 @@ obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
obj-$(CONFIG_MSM_GCC_8916) += gcc-msm8916.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
+obj-$(CONFIG_MSM_RPMCC_8064) += rpmcc-apq8064.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
+obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o
+obj-$(CONFIG_QCOM_HFPLL) += hfpll.o
+obj-$(CONFIG_KRAITCC) += krait-cc.o
+obj-$(CONFIG_QCOM_A53) += clk-a53.o
diff --git a/drivers/clk/qcom/clk-a53.c b/drivers/clk/qcom/clk-a53.c
new file mode 100644
index 000000000000..7320c7b4b3ff
--- /dev/null
+++ b/drivers/clk/qcom/clk-a53.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "clk-pll.h"
+#include "clk-regmap.h"
+#include "clk-regmap-mux-div.h"
+
+#define F_APCS_PLL(f, l, m, n) { (f), (l), (m), (n), 0 }
+
+static struct pll_freq_tbl apcs_pll_freq[] = {
+ F_APCS_PLL( 998400000, 52, 0x0, 0x1),
+ F_APCS_PLL(1094400000, 57, 0x0, 0x1),
+ F_APCS_PLL(1152000000, 62, 0x0, 0x1),
+ F_APCS_PLL(1209600000, 65, 0x0, 0x1),
+ F_APCS_PLL(1401600000, 73, 0x0, 0x1),
+};
+
+static struct clk_pll a53sspll = {
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .config_reg = 0x14,
+ .mode_reg = 0x00,
+ .status_reg = 0x1c,
+ .status_bit = 16,
+ .freq_tbl = apcs_pll_freq,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53sspll",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_pll_sr2_ops,
+ },
+};
+
+static const struct regmap_config a53sspll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x40,
+ .fast_io = true,
+};
+
+static struct clk *a53ss_add_pll(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_pll *pll;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return base;
+
+ pll = &a53sspll;
+
+ regmap = devm_regmap_init_mmio(dev, base, &a53sspll_regmap_config);
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ return devm_clk_register_regmap(dev, &pll->clkr);
+}
+
+enum {
+ P_GPLL0,
+ P_A53SSPLL,
+};
+
+static const struct parent_map gpll0_a53sspll_map[] = {
+ { P_GPLL0, 4 },
+ { P_A53SSPLL, 5 },
+};
+
+static const char *gpll0_a53sspll[] = {
+ "gpll0_vote",
+ "a53sspll",
+};
+
+static struct clk_regmap_mux_div a53ssmux = {
+ .reg_offset = 0x50,
+ .hid_width = 5,
+ .hid_shift = 0,
+ .src_width = 3,
+ .src_shift = 8,
+ .safe_src = 4,
+ .safe_freq = 400000000,
+ .parent_map = gpll0_a53sspll_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "a53ssmux",
+ .parent_names = gpll0_a53sspll,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_div_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk *a53ss_add_mux(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ struct clk_regmap_mux_div *mux;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux = &a53ssmux;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "qcom,apcs");
+ if (IS_ERR(regmap))
+ return ERR_CAST(regmap);
+
+ mux->clkr.regmap = regmap;
+ return devm_clk_register(dev, &mux->clkr.hw);
+}
+
+static const struct of_device_id qcom_a53_match_table[] = {
+ { .compatible = "qcom,clock-a53-msm8916" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_a53_match_table);
+
+static int qcom_a53_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk_pll, *clk_mux;
+ struct clk_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->clks = devm_kcalloc(dev, 2, sizeof(struct clk *), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
+ clk_pll = a53ss_add_pll(pdev);
+ if (IS_ERR(clk_pll))
+ return PTR_ERR(clk_pll);
+
+ clk_mux = a53ss_add_mux(pdev);
+ if (IS_ERR(clk_mux))
+ return PTR_ERR(clk_mux);
+
+ data->clks[0] = clk_pll;
+ data->clks[1] = clk_mux;
+ data->clk_num = 2;
+
+ clk_prepare_enable(clk_pll);
+
+ return of_clk_add_provider(dev->of_node, of_clk_src_onecell_get, data);
+}
+
+static struct platform_driver qcom_a53_driver = {
+ .probe = qcom_a53_probe,
+ .driver = {
+ .name = "qcom-a53",
+ .of_match_table = qcom_a53_match_table,
+ },
+};
+
+module_platform_driver(qcom_a53_driver);
+
+MODULE_DESCRIPTION("Qualcomm A53 Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-a53");
diff --git a/drivers/clk/qcom/clk-hfpll.c b/drivers/clk/qcom/clk-hfpll.c
new file mode 100644
index 000000000000..eacf853c132e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+#define PLL_OUTCTRL BIT(0)
+#define PLL_BYPASSNL BIT(1)
+#define PLL_RESET_N BIT(2)
+
+/* Initialize a HFPLL at a given rate and enable it. */
+static void __clk_hfpll_init_once(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ if (likely(h->init_done))
+ return;
+
+ /* Configure PLL parameters for integer mode. */
+ if (hd->config_val)
+ regmap_write(regmap, hd->config_reg, hd->config_val);
+ regmap_write(regmap, hd->m_reg, 0);
+ regmap_write(regmap, hd->n_reg, 1);
+
+ if (hd->user_reg) {
+ u32 regval = hd->user_val;
+ unsigned long rate;
+
+ rate = clk_hw_get_rate(hw);
+
+ /* Pick the right VCO. */
+ if (hd->user_vco_mask && rate > hd->low_vco_max_rate)
+ regval |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, regval);
+ }
+
+ if (hd->droop_reg)
+ regmap_write(regmap, hd->droop_reg, hd->droop_val);
+
+ h->init_done = true;
+}
+
+static void __clk_hfpll_enable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 val;
+
+ __clk_hfpll_init_once(hw);
+
+ /* Disable PLL bypass mode. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_BYPASSNL, PLL_BYPASSNL);
+
+ /*
+ * H/W requires a 5us delay between disabling the bypass and
+ * de-asserting the reset. Delay 10us just to be safe.
+ */
+ udelay(10);
+
+ /* De-assert active-low PLL reset. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_RESET_N, PLL_RESET_N);
+
+ /* Wait for PLL to lock. */
+ if (hd->status_reg) {
+ do {
+ regmap_read(regmap, hd->status_reg, &val);
+ } while (!(val & BIT(hd->lock_bit)));
+ } else {
+ udelay(60);
+ }
+
+ /* Enable PLL output. */
+ regmap_update_bits(regmap, hd->mode_reg, PLL_OUTCTRL, PLL_OUTCTRL);
+}
+
+/* Enable an already-configured HFPLL. */
+static int clk_hfpll_enable(struct clk_hw *hw)
+{
+ unsigned long flags;
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ spin_lock_irqsave(&h->lock, flags);
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (!(mode & (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)))
+ __clk_hfpll_enable(hw);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static void __clk_hfpll_disable(struct clk_hfpll *h)
+{
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+
+ /*
+ * Disable the PLL output, disable test mode, enable the bypass mode,
+ * and assert the reset.
+ */
+ regmap_update_bits(regmap, hd->mode_reg,
+ PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL, 0);
+}
+
+static void clk_hfpll_disable(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(&h->lock, flags);
+ __clk_hfpll_disable(h);
+ spin_unlock_irqrestore(&h->lock, flags);
+}
+
+static long clk_hfpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ unsigned long rrate;
+
+ rate = clamp(rate, hd->min_rate, hd->max_rate);
+
+ rrate = DIV_ROUND_UP(rate, *parent_rate) * *parent_rate;
+ if (rrate > hd->max_rate)
+ rrate -= *parent_rate;
+
+ return rrate;
+}
+
+/*
+ * For optimization reasons, assumes no downstream clocks are actively using
+ * it.
+ */
+static int clk_hfpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ unsigned long flags;
+ u32 l_val, val;
+ bool enabled;
+
+ l_val = rate / parent_rate;
+
+ spin_lock_irqsave(&h->lock, flags);
+
+ enabled = __clk_is_enabled(hw->clk);
+ if (enabled)
+ __clk_hfpll_disable(h);
+
+ /* Pick the right VCO. */
+ if (hd->user_reg && hd->user_vco_mask) {
+ regmap_read(regmap, hd->user_reg, &val);
+ if (rate <= hd->low_vco_max_rate)
+ val &= ~hd->user_vco_mask;
+ else
+ val |= hd->user_vco_mask;
+ regmap_write(regmap, hd->user_reg, val);
+ }
+
+ regmap_write(regmap, hd->l_reg, l_val);
+
+ if (enabled)
+ __clk_hfpll_enable(hw);
+
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_hfpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 l_val;
+
+ regmap_read(regmap, hd->l_reg, &l_val);
+
+ return l_val * parent_rate;
+}
+
+static void clk_hfpll_init(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode, status;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ if (mode != (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL)) {
+ __clk_hfpll_init_once(hw);
+ return;
+ }
+
+ if (hd->status_reg) {
+ regmap_read(regmap, hd->status_reg, &status);
+ if (!(status & BIT(hd->lock_bit))) {
+ WARN(1, "HFPLL %s is ON, but not locked!\n",
+ __clk_get_name(hw->clk));
+ clk_hfpll_disable(hw);
+ __clk_hfpll_init_once(hw);
+ }
+ }
+}
+
+static int hfpll_is_enabled(struct clk_hw *hw)
+{
+ struct clk_hfpll *h = to_clk_hfpll(hw);
+ struct hfpll_data const *hd = h->d;
+ struct regmap *regmap = h->clkr.regmap;
+ u32 mode;
+
+ regmap_read(regmap, hd->mode_reg, &mode);
+ mode &= 0x7;
+ return mode == (PLL_BYPASSNL | PLL_RESET_N | PLL_OUTCTRL);
+}
+
+const struct clk_ops clk_ops_hfpll = {
+ .enable = clk_hfpll_enable,
+ .disable = clk_hfpll_disable,
+ .is_enabled = hfpll_is_enabled,
+ .round_rate = clk_hfpll_round_rate,
+ .set_rate = clk_hfpll_set_rate,
+ .recalc_rate = clk_hfpll_recalc_rate,
+ .init = clk_hfpll_init,
+};
+EXPORT_SYMBOL_GPL(clk_ops_hfpll);
diff --git a/drivers/clk/qcom/clk-hfpll.h b/drivers/clk/qcom/clk-hfpll.h
new file mode 100644
index 000000000000..48c18d664f4e
--- /dev/null
+++ b/drivers/clk/qcom/clk-hfpll.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_CLK_HFPLL_H__
+#define __QCOM_CLK_HFPLL_H__
+
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include "clk-regmap.h"
+
+struct hfpll_data {
+ u32 mode_reg;
+ u32 l_reg;
+ u32 m_reg;
+ u32 n_reg;
+ u32 user_reg;
+ u32 droop_reg;
+ u32 config_reg;
+ u32 status_reg;
+ u8 lock_bit;
+
+ u32 droop_val;
+ u32 config_val;
+ u32 user_val;
+ u32 user_vco_mask;
+ unsigned long low_vco_max_rate;
+
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
+struct clk_hfpll {
+ struct hfpll_data const *d;
+ int init_done;
+
+ struct clk_regmap clkr;
+ spinlock_t lock;
+};
+
+#define to_clk_hfpll(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_hfpll, clkr)
+
+extern const struct clk_ops clk_ops_hfpll;
+
+#endif
diff --git a/drivers/clk/qcom/clk-krait.c b/drivers/clk/qcom/clk-krait.c
new file mode 100644
index 000000000000..dd69146a22f5
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+
+#include <asm/krait-l2-accessors.h>
+
+#include "clk-krait.h"
+
+/* Secondary and primary muxes share the same cp15 register */
+static DEFINE_SPINLOCK(krait_clock_reg_lock);
+
+#define LPL_SHIFT 8
+static void __krait_mux_set_sel(struct krait_mux_clk *mux, int sel)
+{
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ regval = krait_get_l2_indirect_reg(mux->offset);
+ regval &= ~(mux->mask << mux->shift);
+ regval |= (sel & mux->mask) << mux->shift;
+ if (mux->lpl) {
+ regval &= ~(mux->mask << (mux->shift + LPL_SHIFT));
+ regval |= (sel & mux->mask) << (mux->shift + LPL_SHIFT);
+ }
+ krait_set_l2_indirect_reg(mux->offset, regval);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ /* Wait for switch to complete. */
+ mb();
+ udelay(1);
+}
+
+static int krait_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = clk_mux_reindex(index, mux->parent_map, 0);
+ mux->en_mask = sel;
+ /* Don't touch mux if CPU is off as it won't work */
+ if (__clk_is_enabled(hw->clk))
+ __krait_mux_set_sel(mux, sel);
+ return 0;
+}
+
+static u8 krait_mux_get_parent(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ u32 sel;
+
+ sel = krait_get_l2_indirect_reg(mux->offset);
+ sel >>= mux->shift;
+ sel &= mux->mask;
+ mux->en_mask = sel;
+
+ return clk_mux_get_parent(hw, sel, mux->parent_map, 0);
+}
+
+static struct clk_hw *krait_mux_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ i = mux->safe_sel;
+ for (i = 0; i < num_parents; i++)
+ if (mux->safe_sel == mux->parent_map[i])
+ break;
+
+ if (safe_freq)
+ *safe_freq = 0;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static int krait_mux_enable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->en_mask);
+
+ return 0;
+}
+
+static void krait_mux_disable(struct clk_hw *hw)
+{
+ struct krait_mux_clk *mux = to_krait_mux_clk(hw);
+
+ __krait_mux_set_sel(mux, mux->safe_sel);
+}
+
+const struct clk_ops krait_mux_clk_ops = {
+ .enable = krait_mux_enable,
+ .disable = krait_mux_disable,
+ .set_parent = krait_mux_set_parent,
+ .get_parent = krait_mux_get_parent,
+ .determine_rate = __clk_mux_determine_rate_closest,
+ .get_safe_parent = krait_mux_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(krait_mux_clk_ops);
+
+/* The divider can divide by 2, 4, 6 and 8. But we only really need div-2. */
+static long krait_div2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ *parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), rate * 2);
+ return DIV_ROUND_UP(*parent_rate, 2);
+}
+
+static int krait_div2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ unsigned long flags;
+ u32 val;
+ u32 mask = BIT(d->width) - 1;
+
+ if (d->lpl)
+ mask = mask << (d->shift + LPL_SHIFT) | mask << d->shift;
+
+ spin_lock_irqsave(&krait_clock_reg_lock, flags);
+ val = krait_get_l2_indirect_reg(d->offset);
+ val &= ~mask;
+ krait_set_l2_indirect_reg(d->offset, val);
+ spin_unlock_irqrestore(&krait_clock_reg_lock, flags);
+
+ return 0;
+}
+
+static unsigned long
+krait_div2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct krait_div2_clk *d = to_krait_div2_clk(hw);
+ u32 mask = BIT(d->width) - 1;
+ u32 div;
+
+ div = krait_get_l2_indirect_reg(d->offset);
+ div >>= d->shift;
+ div &= mask;
+ div = (div + 1) * 2;
+
+ return DIV_ROUND_UP(parent_rate, div);
+}
+
+const struct clk_ops krait_div2_clk_ops = {
+ .round_rate = krait_div2_round_rate,
+ .set_rate = krait_div2_set_rate,
+ .recalc_rate = krait_div2_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(krait_div2_clk_ops);
diff --git a/drivers/clk/qcom/clk-krait.h b/drivers/clk/qcom/clk-krait.h
new file mode 100644
index 000000000000..5d0063538e5d
--- /dev/null
+++ b/drivers/clk/qcom/clk-krait.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_KRAIT_H
+#define __QCOM_CLK_KRAIT_H
+
+#include <linux/clk-provider.h>
+
+struct krait_mux_clk {
+ unsigned int *parent_map;
+ bool has_safe_parent;
+ u8 safe_sel;
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 en_mask;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_mux_clk(_hw) container_of(_hw, struct krait_mux_clk, hw)
+
+extern const struct clk_ops krait_mux_clk_ops;
+
+struct krait_div2_clk {
+ u32 offset;
+ u8 width;
+ u32 shift;
+ bool lpl;
+
+ struct clk_hw hw;
+};
+
+#define to_krait_div2_clk(_hw) container_of(_hw, struct krait_div2_clk, hw)
+
+extern const struct clk_ops krait_div2_clk_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 56028bb31d87..9b3ef5d67895 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -153,6 +153,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @hid_width: number of bits in half integer divider
* @parent_map: map from software's parent index to hardware's src_sel field
* @freq_tbl: frequency table
+ * @current_freq: cached frequency, used for shared branches
* @clkr: regmap clock handle
* @lock: register lock
*
@@ -163,14 +164,17 @@ struct clk_rcg2 {
u8 hid_width;
const struct parent_map *parent_map;
const struct freq_tbl *freq_tbl;
+ unsigned long current_freq;
struct clk_regmap clkr;
};
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
extern const struct clk_ops clk_rcg2_ops;
+extern const struct clk_ops clk_rcg2_shared_ops;
extern const struct clk_ops clk_edp_pixel_ops;
extern const struct clk_ops clk_byte_ops;
+extern const struct clk_ops clk_byte2_ops;
extern const struct clk_ops clk_pixel_ops;
#endif
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 9aec1761fd29..3e8d0bde4704 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -300,6 +300,74 @@ const struct clk_ops clk_rcg2_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg2_ops);
+static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ const char *name = __clk_get_name(hw->clk);
+ int ret, count;
+
+ /* cache the frequency */
+ rcg->current_freq = rate;
+
+ /* do not set any rate while the clock is off */
+ if (!__clk_is_enabled(hw->clk))
+ return 0;
+
+ /* force enable RCG */
+ ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, CMD_ROOT_EN);
+ if (ret)
+ return ret;
+
+ /* wait for RCG to turn ON */
+ for (count = 500; count > 0; count--) {
+ ret = clk_rcg2_is_enabled(hw);
+ if (ret)
+ break;
+ udelay(1);
+ }
+ if (!count)
+ pr_err("%s: RCG did not turn on\n", name);
+
+ /* set clock rate */
+ ret = __clk_rcg2_set_rate(hw, rate);
+ if (ret)
+ return ret;
+
+ /* clear force enable RCG */
+ return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+ CMD_ROOT_EN, 0);
+}
+
+static int clk_rcg2_shared_enable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ if (!rcg->current_freq)
+ return 0;
+
+ return clk_rcg2_shared_set_rate(hw, rcg->current_freq, 0);
+}
+
+static void clk_rcg2_shared_disable(struct clk_hw *hw)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+ /* switch to XO, which is the lowest entry in the freq table */
+ clk_rcg2_shared_set_rate(hw, rcg->freq_tbl[0].freq, 0);
+}
+
+const struct clk_ops clk_rcg2_shared_ops = {
+ .enable = clk_rcg2_shared_enable,
+ .disable = clk_rcg2_shared_disable,
+ .get_parent = clk_rcg2_get_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .determine_rate = clk_rcg2_determine_rate,
+ .set_rate = clk_rcg2_shared_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
+
struct frac_entry {
int num;
int den;
@@ -485,6 +553,76 @@ const struct clk_ops clk_byte_ops = {
};
EXPORT_SYMBOL_GPL(clk_byte_ops);
+static int clk_byte2_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ unsigned long parent_rate, div;
+ u32 mask = BIT(rcg->hid_width) - 1;
+ struct clk_hw *p;
+ unsigned long rate = req->rate;
+
+ if (rate == 0)
+ return -EINVAL;
+
+ p = req->best_parent_hw;
+ req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ req->rate = calc_rate(parent_rate, 0, 0, 0, div);
+
+ return 0;
+}
+
+static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+ struct freq_tbl f = { 0 };
+ unsigned long div;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ u32 mask = BIT(rcg->hid_width) - 1;
+ u32 cfg;
+
+ div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
+ div = min_t(u32, div, mask);
+
+ f.pre_div = div;
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++) {
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ return clk_rcg2_configure(rcg, &f);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate, u8 index)
+{
+ /* Read the hardware to determine parent during set_rate */
+ return clk_byte2_set_rate(hw, rate, parent_rate);
+}
+
+const struct clk_ops clk_byte2_ops = {
+ .is_enabled = clk_rcg2_is_enabled,
+ .get_parent = clk_rcg2_get_parent,
+ .set_parent = clk_rcg2_set_parent,
+ .recalc_rate = clk_rcg2_recalc_rate,
+ .set_rate = clk_byte2_set_rate,
+ .set_rate_and_parent = clk_byte2_set_rate_and_parent,
+ .determine_rate = clk_byte2_determine_rate,
+};
+EXPORT_SYMBOL_GPL(clk_byte2_ops);
+
static const struct frac_entry frac_table_pixel[] = {
{ 3, 8 },
{ 2, 9 },
@@ -496,14 +634,9 @@ static const struct frac_entry frac_table_pixel[] = {
static int clk_pixel_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
- struct clk_rcg2 *rcg = to_clk_rcg2(hw);
unsigned long request, src_rate;
int delta = 100000;
- const struct freq_tbl *f = rcg->freq_tbl;
const struct frac_entry *frac = frac_table_pixel;
- int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
-
- req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
for (; frac->num; frac++) {
request = (req->rate * frac->den) / frac->num;
@@ -525,12 +658,23 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
- struct freq_tbl f = *rcg->freq_tbl;
+ struct freq_tbl f = { 0 };
const struct frac_entry *frac = frac_table_pixel;
unsigned long request;
int delta = 100000;
u32 mask = BIT(rcg->hid_width) - 1;
- u32 hid_div;
+ u32 hid_div, cfg;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+
+ regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
+ cfg &= CFG_SRC_SEL_MASK;
+ cfg >>= CFG_SRC_SEL_SHIFT;
+
+ for (i = 0; i < num_parents; i++)
+ if (cfg == rcg->parent_map[i].cfg) {
+ f.src = rcg->parent_map[i].src;
+ break;
+ }
for (; frac->num; frac++) {
request = (rate * frac->den) / frac->num;
@@ -555,7 +699,6 @@ static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate, u8 index)
{
- /* Parent index is set statically in frequency table */
return clk_pixel_set_rate(hw, rate, parent_rate);
}
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.c b/drivers/clk/qcom/clk-regmap-mux-div.c
new file mode 100644
index 000000000000..f43da2acfab3
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.c
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap-mux-div.h"
+
+#define CMD_RCGR 0x0
+#define CMD_RCGR_UPDATE BIT(0)
+#define CMD_RCGR_DIRTY_CFG BIT(4)
+#define CMD_RCGR_ROOT_OFF BIT(31)
+#define CFG_RCGR 0x4
+
+static int __mux_div_update_config(struct clk_regmap_mux_div *md)
+{
+ int ret;
+ u32 val, count;
+ const char *name = __clk_get_name(md->clkr.hw.clk);
+
+ ret = regmap_update_bits(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ CMD_RCGR_UPDATE, CMD_RCGR_UPDATE);
+ if (ret)
+ return ret;
+
+ /* Wait for update to take effect */
+ for (count = 500; count > 0; count--) {
+ ret = regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset,
+ &val);
+ if (ret)
+ return ret;
+ if (!(val & CMD_RCGR_UPDATE))
+ return 0;
+ udelay(1);
+ }
+
+ pr_err("%s: rcg did not update its configuration.", name);
+ return -EBUSY;
+}
+
+static int __mux_div_set_src_div(struct clk_regmap_mux_div *md, u32 src_sel,
+ u32 src_div)
+{
+ int ret;
+ u32 val, mask;
+
+ val = (src_div << md->hid_shift) | (src_sel << md->src_shift);
+ mask = ((BIT(md->hid_width) - 1) << md->hid_shift) |
+ ((BIT(md->src_width) - 1) << md->src_shift);
+
+ ret = regmap_update_bits(md->clkr.regmap, CFG_RCGR + md->reg_offset,
+ mask, val);
+ if (ret)
+ return ret;
+
+ ret = __mux_div_update_config(md);
+ return ret;
+}
+
+static void __mux_div_get_src_div(struct clk_regmap_mux_div *md, u32 *src_sel,
+ u32 *src_div)
+{
+ u32 val, div, src;
+ const char *name = __clk_get_name(md->clkr.hw.clk);
+
+ regmap_read(md->clkr.regmap, CMD_RCGR + md->reg_offset, &val);
+
+ if (val & CMD_RCGR_DIRTY_CFG) {
+ pr_err("%s: rcg configuration is pending.\n", name);
+ return;
+ }
+
+ regmap_read(md->clkr.regmap, CFG_RCGR + md->reg_offset, &val);
+ src = (val >> md->src_shift);
+ src &= BIT(md->src_width) - 1;
+ *src_sel = src;
+
+ div = (val >> md->hid_shift);
+ div &= BIT(md->hid_width) - 1;
+ *src_div = div;
+}
+
+static int mux_div_enable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->src_sel, md->div);
+}
+
+static inline bool is_better_rate(unsigned long req, unsigned long best,
+ unsigned long new)
+{
+ return (req <= new && new < best) || (best < req && best < new);
+}
+
+static int mux_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ unsigned int i, div, max_div;
+ unsigned long actual_rate, rrate = 0;
+ unsigned long rate = req->rate;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+
+ if (actual_rate < rate || rrate <= rate)
+ break;
+ }
+ }
+
+ if (!rrate)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u32 src_sel)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int ret, i;
+ u32 div, max_div, best_src = 0, best_div = 0;
+ unsigned long actual_rate = 0, rrate = 0;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(parent);
+
+ max_div = BIT(md->hid_width) - 1;
+ for (div = 1; div < max_div; div++) {
+ parent_rate = mult_frac(rate, div, 2);
+ parent_rate = clk_hw_round_rate(parent, parent_rate);
+ actual_rate = mult_frac(parent_rate, 2, div);
+
+ if (is_better_rate(rate, rrate, actual_rate)) {
+ rrate = actual_rate;
+ best_src = md->parent_map[i].cfg;
+ best_div = div - 1;
+ }
+
+ if (actual_rate < rate || rrate <= rate)
+ break;
+ }
+ }
+
+ ret = __mux_div_set_src_div(md, best_src, best_div);
+ if (!ret) {
+ md->div = best_div;
+ md->src_sel = best_src;
+ }
+
+ return ret;
+}
+
+static u8 mux_div_get_parent(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+ const char *name = __clk_get_name(hw->clk);
+ u32 i, div, src;
+
+ __mux_div_get_src_div(md, &src, &div);
+
+ for (i = 0; i < num_parents; i++)
+ if (src == md->parent_map[i].cfg)
+ return i;
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static int mux_div_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_src_div(md, md->parent_map[index].cfg, md->div);
+}
+
+static int mux_div_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ u8 pindex = mux_div_get_parent(hw);
+ struct clk_hw *parent = clk_hw_get_parent_by_index(hw, pindex);
+ unsigned long current_prate = clk_hw_get_rate(parent);
+
+ if (rate > current_prate)
+ return -EINVAL;
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate, md->src_sel);
+}
+
+static int mux_div_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate, u8 index)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+
+ return __mux_div_set_rate_and_parent(hw, rate, prate,
+ md->parent_map[index].cfg);
+}
+
+static unsigned long mux_div_recalc_rate(struct clk_hw *hw, unsigned long prate)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ u32 div, src;
+ int i, num_parents = clk_hw_get_num_parents(hw);
+ const char *name = __clk_get_name(hw->clk);
+
+ __mux_div_get_src_div(md, &src, &div);
+ for (i = 0; i < num_parents; i++)
+ if (src == md->parent_map[i].cfg) {
+ struct clk_hw *p = clk_hw_get_parent_by_index(hw, i);
+ unsigned long parent_rate = clk_hw_get_rate(p);
+
+ return mult_frac(parent_rate, 2, div + 1);
+ }
+
+ pr_err("%s: Can't find parent %d\n", name, src);
+ return 0;
+}
+
+static struct clk_hw *mux_div_get_safe_parent(struct clk_hw *hw,
+ unsigned long *safe_freq)
+{
+ int i;
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ int num_parents = clk_hw_get_num_parents(hw);
+
+ if (md->safe_freq)
+ *safe_freq = md->safe_freq;
+
+ for (i = 0; i < num_parents; i++)
+ if (md->safe_src == md->parent_map[i].cfg)
+ break;
+
+ return clk_hw_get_parent_by_index(hw, i);
+}
+
+static void mux_div_disable(struct clk_hw *hw)
+{
+ struct clk_regmap_mux_div *md = to_clk_regmap_mux_div(hw);
+ struct clk_hw *parent;
+ u32 div;
+
+ if (!md->safe_freq || !md->safe_src)
+ return;
+
+ parent = mux_div_get_safe_parent(hw, &md->safe_freq);
+ div = divider_get_val(md->safe_freq, clk_hw_get_rate(parent), NULL,
+ md->hid_width, CLK_DIVIDER_ROUND_CLOSEST);
+ div = 2 * div + 1;
+
+ __mux_div_set_src_div(md, md->safe_src, div);
+}
+
+const struct clk_ops clk_regmap_mux_div_ops = {
+ .enable = mux_div_enable,
+ .disable = mux_div_disable,
+ .get_parent = mux_div_get_parent,
+ .set_parent = mux_div_set_parent,
+ .set_rate = mux_div_set_rate,
+ .set_rate_and_parent = mux_div_set_rate_and_parent,
+ .determine_rate = mux_div_determine_rate,
+ .recalc_rate = mux_div_recalc_rate,
+ .get_safe_parent = mux_div_get_safe_parent,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux-div.h b/drivers/clk/qcom/clk-regmap-mux-div.h
new file mode 100644
index 000000000000..b887a900ad89
--- /dev/null
+++ b/drivers/clk/qcom/clk-regmap-mux-div.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_DIV_H__
+#define __QCOM_CLK_REGMAP_MUX_DIV_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+#include "clk-rcg.h"
+
+/**
+ * struct mux_div_clk - combined mux/divider clock
+ * @reg_offset: offset of the mux/divider register
+ * @hid_width: number of bits in half integer divider
+ * @hid_shift: lowest bit of hid value field
+ * @src_width: number of bits in source select
+ * @src_shift: lowest bit of source select field
+ * @div: the divider configuration value
+ * @src_sel: the mux index which will be used if the clock is enabled
+ * @safe_src: value for safe source
+ * @safe_freq: When switching rates from A to B, the mux div clock will
+ * instead switch from A -> safe_freq -> B. This allows the
+ * mux_div clock to change rates while enabled, even if this
+ * behavior is not supported by the parent clocks.
+ * If changing the rate of parent A also causes the rate of
+ * parent B to change, then safe_freq must be defined.
+ * safe_freq is expected to have a source clock which is always
+ * on and runs at only one rate.
+ * @parent_map: pointer to parent_map struct
+ * @clkr: handle between common and hardware-specific interfaces
+ */
+
+struct clk_regmap_mux_div {
+ u32 reg_offset;
+ u32 hid_width;
+ u32 hid_shift;
+ u32 src_width;
+ u32 src_shift;
+ u32 div;
+ u32 src_sel;
+ u32 safe_src;
+ unsigned long safe_freq;
+ const struct parent_map *parent_map;
+ struct clk_regmap clkr;
+};
+
+#define to_clk_regmap_mux_div(_hw) \
+ container_of(to_clk_regmap(_hw), struct clk_regmap_mux_div, clkr)
+
+extern const struct clk_ops clk_regmap_mux_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
new file mode 100644
index 000000000000..859b709385f6
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "clk-smd-rpm.h"
+
+#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
+
+static DEFINE_MUTEX(rpm_clk_lock);
+
+static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
+ unsigned long value)
+{
+ struct clk_smd_rpm_req req = {
+ .key = r->rpm_key,
+ .nbytes = sizeof(u32),
+ .value = DIV_ROUND_UP(value, 1000), /* RPM expects kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
+ unsigned long value)
+{
+ struct clk_smd_rpm_req req = {
+ .key = r->rpm_key,
+ .nbytes = sizeof(u32),
+ .value = DIV_ROUND_UP(value, 1000), /* RPM expects kHz */
+ };
+
+ return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ r->rpm_res_type, r->rpm_clk_id, &req,
+ sizeof(req));
+}
+
+static void to_active_sleep(struct clk_smd_rpm *r, unsigned long rate,
+ unsigned long *active, unsigned long *sleep)
+{
+ *active = rate;
+
+ /*
+ * Active-only clocks don't care what the rate is during sleep. So,
+ * they vote for zero.
+ */
+ if (r->active_only)
+ *sleep = 0;
+ else
+ *sleep = *active;
+}
+
+static int clk_smd_rpm_prepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (!r->rate)
+ goto out;
+
+ to_active_sleep(r, r->rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+
+ if (r->branch)
+ active_rate = !!active_rate;
+
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ if (r->branch)
+ sleep_rate = !!sleep_rate;
+
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ /* Undo the active set vote and restore it */
+ ret = clk_smd_rpm_set_rate_active(r, peer_rate);
+
+out:
+ if (!ret)
+ r->enabled = true;
+
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static void clk_smd_rpm_unprepare(struct clk_hw *hw)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (r->rate) {
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+ unsigned long active_rate, sleep_rate;
+ int ret;
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate, &peer_rate,
+ &peer_sleep_rate);
+
+ active_rate = r->branch ? !!peer_rate : peer_rate;
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = r->branch ? !!peer_sleep_rate : peer_sleep_rate;
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+ }
+ r->enabled = false;
+
+out:
+ mutex_unlock(&rpm_clk_lock);
+}
+
+static int clk_smd_rpm_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+ int ret = 0;
+
+ mutex_lock(&rpm_clk_lock);
+
+ if (r->enabled) {
+ struct clk_smd_rpm *peer = r->peer;
+ unsigned long active_rate, sleep_rate;
+ unsigned long this_rate = 0, this_sleep_rate = 0;
+ unsigned long peer_rate = 0, peer_sleep_rate = 0;
+
+ to_active_sleep(r, rate, &this_rate, &this_sleep_rate);
+
+ /* Take peer clock's rate into account only if it's enabled. */
+ if (peer->enabled)
+ to_active_sleep(peer, peer->rate,
+ &peer_rate, &peer_sleep_rate);
+
+ active_rate = max(this_rate, peer_rate);
+ ret = clk_smd_rpm_set_rate_active(r, active_rate);
+ if (ret)
+ goto out;
+
+ sleep_rate = max(this_sleep_rate, peer_sleep_rate);
+ ret = clk_smd_rpm_set_rate_sleep(r, sleep_rate);
+ if (ret)
+ goto out;
+ }
+ r->rate = rate;
+out:
+ mutex_unlock(&rpm_clk_lock);
+
+ return ret;
+}
+
+static long clk_smd_rpm_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate is requested.
+ */
+ return rate;
+}
+
+static unsigned long clk_smd_rpm_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_smd_rpm *r = to_clk_smd_rpm(hw);
+
+ /*
+ * RPM handles rate rounding and we don't have a way to
+ * know what the rate will be, so just return whatever
+ * rate was set.
+ */
+ return r->rate;
+}
+
+int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm)
+{
+ int ret;
+ struct clk_smd_rpm_req req = {
+ .key = QCOM_RPM_SMD_KEY_ENABLE,
+ .nbytes = sizeof(u32),
+ .value = 1,
+ };
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_SLEEP_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (sleep set) not enabled!\n");
+ return ret;
+ }
+
+ ret = qcom_rpm_smd_write(rpm, QCOM_SMD_RPM_ACTIVE_STATE,
+ QCOM_SMD_RPM_MISC_CLK,
+ QCOM_RPM_SCALING_ENABLE_ID, &req, sizeof(req));
+ if (ret) {
+ pr_err("RPM clock scaling (active set) not enabled!\n");
+ return ret;
+ }
+
+ pr_debug("%s: RPM clock scaling is enabled\n", __func__);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_smd_rpm_enable_scaling);
+
+const struct clk_ops clk_smd_rpm_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .set_rate = clk_smd_rpm_set_rate,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_smd_rpm_ops);
+
+const struct clk_ops clk_smd_rpm_branch_ops = {
+ .prepare = clk_smd_rpm_prepare,
+ .unprepare = clk_smd_rpm_unprepare,
+ .round_rate = clk_smd_rpm_round_rate,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_smd_rpm_branch_ops);
diff --git a/drivers/clk/qcom/clk-smd-rpm.h b/drivers/clk/qcom/clk-smd-rpm.h
new file mode 100644
index 000000000000..d5dafad6b0fc
--- /dev/null
+++ b/drivers/clk/qcom/clk-smd-rpm.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_SMD_RPM_H__
+#define __QCOM_CLK_SMD_RPM_H__
+
+#include <linux/clk-provider.h>
+
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct qcom_smd_rpm;
+
+struct clk_smd_rpm {
+ const int rpm_res_type;
+ const int rpm_key;
+ const int rpm_clk_id;
+ const int rpm_status_id;
+ const bool active_only;
+ bool enabled;
+ bool branch;
+ struct clk_smd_rpm *peer;
+ struct clk_hw hw;
+ unsigned long rate;
+ struct qcom_smd_rpm *rpm;
+};
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
+extern const struct clk_ops clk_smd_rpm_ops;
+extern const struct clk_ops clk_smd_rpm_branch_ops;
+int clk_smd_rpm_enable_scaling(struct qcom_smd_rpm *rpm);
+
+#define __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, stat_id, dep, key) \
+ static struct clk_smd_rpm active; \
+ static struct clk_smd_rpm _name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_name, \
+ .active_only = true, \
+ .rate = INT_MAX, \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_ops, \
+ .name = #active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ };
+
+#define __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, stat_id, r, \
+ key) \
+ static struct clk_smd_rpm active; \
+ static struct clk_smd_rpm _name = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &active, \
+ .branch = true, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #_name, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ }; \
+ static struct clk_smd_rpm active = { \
+ .rpm_res_type = (type), \
+ .rpm_clk_id = (r_id), \
+ .rpm_status_id = (stat_id), \
+ .rpm_key = (key), \
+ .peer = &_name, \
+ .active_only = true, \
+ .branch = true, \
+ .rate = (r), \
+ .hw.init = &(struct clk_init_data){ \
+ .ops = &clk_smd_rpm_branch_ops, \
+ .name = #active, \
+ .parent_names = (const char *[]){ "xo_board" }, \
+ .num_parents = 1, \
+ }, \
+ };
+
+#define DEFINE_CLK_SMD_RPM(_name, active, type, r_id, dep) \
+ __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, 0, dep, \
+ QCOM_RPM_SMD_KEY_RATE)
+
+#define DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, r) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, type, r_id, 0, r, \
+ QCOM_RPM_SMD_KEY_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_QDSS(_name, active, type, r_id) \
+ __DEFINE_CLK_SMD_RPM(_name, active, type, r_id, \
+ 0, 0, QCOM_RPM_SMD_KEY_STATE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_name, active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_SOFTWARE_ENABLE)
+
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_name, active, r_id) \
+ __DEFINE_CLK_SMD_RPM_BRANCH(_name, active, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
+
+#endif
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 2dedceefd21d..62490df44e4e 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -22,6 +22,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "reset.h"
+#include "gdsc.h"
struct qcom_cc {
struct qcom_reset_controller reset;
@@ -121,8 +122,20 @@ int qcom_cc_really_probe(struct platform_device *pdev,
ret = reset_controller_register(&reset->rcdev);
if (ret)
- of_clk_del_provider(dev->of_node);
+ goto err_reset;
+ if (desc->gdscs && desc->num_gdscs) {
+ ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs, regmap);
+ if (ret)
+ goto err_pd;
+ }
+
+ return 0;
+err_pd:
+ dev_err(dev, "Failed to register power domains\n");
+ reset_controller_unregister(&reset->rcdev);
+err_reset:
+ of_clk_del_provider(dev->of_node);
return ret;
}
EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
@@ -141,6 +154,7 @@ EXPORT_SYMBOL_GPL(qcom_cc_probe);
void qcom_cc_remove(struct platform_device *pdev)
{
+ gdsc_unregister(&pdev->dev);
of_clk_del_provider(pdev->dev.of_node);
reset_controller_unregister(platform_get_drvdata(pdev));
}
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index 7a0e73713063..2892b71fbd71 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -28,6 +28,8 @@ struct qcom_cc_desc {
size_t num_clks;
const struct qcom_reset_map *resets;
size_t num_resets;
+ struct gdsc **gdscs;
+ size_t num_gdscs;
};
extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index 3563019b8e3c..0c93bf8f2da5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -3254,6 +3255,34 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+};
+
+static struct gdsc pcie0_gdsc = {
+ .gdscr = 0x1ac4,
+ .pd = {
+ .name = "pcie0",
+ },
+};
+
+static struct gdsc pcie1_gdsc = {
+ .gdscr = 0x1b44,
+ .pd = {
+ .name = "pcie1",
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x1e84,
+ .pd = {
+ .name = "usb30",
+ },
+};
+
static struct clk_regmap *gcc_apq8084_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -3447,6 +3476,13 @@ static struct clk_regmap *gcc_apq8084_clocks[] = {
[GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
};
+static struct gdsc *gcc_apq8084_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+ [PCIE0_GDSC] = &pcie0_gdsc,
+ [PCIE1_GDSC] = &pcie1_gdsc,
+ [USB30_GDSC] = &usb30_gdsc,
+};
+
static const struct qcom_reset_map gcc_apq8084_resets[] = {
[GCC_SYSTEM_NOC_BCR] = { 0x0100 },
[GCC_CONFIG_NOC_BCR] = { 0x0140 },
@@ -3555,6 +3591,8 @@ static const struct qcom_cc_desc gcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(gcc_apq8084_clocks),
.resets = gcc_apq8084_resets,
.num_resets = ARRAY_SIZE(gcc_apq8084_resets),
+ .gdscs = gcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_apq8084_gdscs),
};
static const struct of_device_id gcc_apq8084_match_table[] = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index 40e480220cd3..c6b59f199e97 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll0 = {
@@ -113,6 +114,85 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -2837,6 +2917,9 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
[UBI32_CORE2_CLK_SRC] = &ubi32_core2_src_clk.clkr,
[NSSTCM_CLK_SRC] = &nss_tcm_src.clkr,
[NSSTCM_CLK] = &nss_tcm_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_ipq806x_resets[] = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index 22a4e1e732c0..ea2345e1abe2 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -44,6 +45,9 @@ enum {
P_SLEEP_CLK,
P_DSI0_PHYPLL_BYTE,
P_DSI0_PHYPLL_DSI,
+ P_EXT_PRI_I2S,
+ P_EXT_SEC_I2S,
+ P_EXT_MCLK,
};
static const struct parent_map gcc_xo_gpll0_map[] = {
@@ -190,6 +194,76 @@ static const char * const gcc_xo_gpll0a_gpll1_gpll2[] = {
"gpll2_vote",
};
+static const struct parent_map gcc_xo_gpll0_gpll1_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL1, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll0_gpll1_sleep[] = {
+ "xo",
+ "gpll0_vote",
+ "gpll1_vote",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_epi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_PRI_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_epi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_pri_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_esi2s_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_SEC_I2S, 2 },
+ { P_EXT_MCLK, 3 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_esi2s_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_sec_i2s",
+ "ext_mclk",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_sleep_map[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_sleep[] = {
+ "xo",
+ "sleep_clk",
+};
+
+static const struct parent_map gcc_xo_gpll1_emclk_sleep_map[] = {
+ { P_XO, 0 },
+ { P_GPLL1, 1 },
+ { P_EXT_MCLK, 2 },
+ { P_SLEEP_CLK, 6 }
+};
+
+static const char *gcc_xo_gpll1_emclk_sleep[] = {
+ "xo",
+ "gpll1_vote",
+ "ext_mclk",
+ "sleep_clk",
+};
+
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
static struct clk_pll gpll0 = {
@@ -906,21 +980,15 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_byte0_clk[] = {
- { .src = P_DSI0_PHYPLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x4d044,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsibyte_map,
- .freq_tbl = ftbl_gcc_mdss_byte0_clk,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = gcc_xo_gpll0a_dsibyte,
.num_parents = 3,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -968,17 +1036,11 @@ static struct clk_rcg2 mdp_clk_src = {
},
};
-static struct freq_tbl ftbl_gcc_mdss_pclk[] = {
- { .src = P_DSI0_PHYPLL_DSI },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x4d000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0a_dsiphy_map,
- .freq_tbl = ftbl_gcc_mdss_pclk,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = gcc_xo_gpll0a_dsiphy,
@@ -1094,6 +1156,29 @@ static struct clk_rcg2 apss_tcu_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_bimc_gpu_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ F(266500000, P_BIMC, 4, 0, 0),
+ F(533000000, P_BIMC, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 bimc_gpu_clk_src = {
+ .cmd_rcgr = 0x31028,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .freq_tbl = ftbl_gcc_bimc_gpu_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_gpu_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .flags = CLK_GET_RATE_NOCACHE,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
F(80000000, P_GPLL0, 10, 0, 0),
{ }
@@ -1112,6 +1197,305 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
},
};
+static const struct freq_tbl ftbl_gcc_ultaudio_ahb_clk[] = {
+ F(3200000, P_XO, 6, 0, 0),
+ F(6400000, P_XO, 3, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(19200000, P_XO, 1, 0, 0),
+ F(40000000, P_GPLL0, 10, 1, 2),
+ F(66670000, P_GPLL0, 12, 0, 0),
+ F(80000000, P_GPLL0, 10, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_ahbfabric_clk_src = {
+ .cmd_rcgr = 0x1c010,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll0_gpll1_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_ahb_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_ahbfabric_clk_src",
+ .parent_names = gcc_xo_gpll0_gpll1_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_clk = {
+ .halt_reg = 0x1c028,
+ .clkr = {
+ .enable_reg = 0x1c028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_ahbfabric_ixfabric_lpm_clk = {
+ .halt_reg = 0x1c024,
+ .clkr = {
+ .enable_reg = 0x1c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_ahbfabric_ixfabric_lpm_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_ahbfabric_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_lpaif_i2s_clk[] = {
+ F(256000, P_XO, 5, 1, 15),
+ F(512000, P_XO, 5, 2, 15),
+ F(705600, P_GPLL1, 16, 1, 80),
+ F(768000, P_XO, 5, 1, 5),
+ F(800000, P_XO, 5, 5, 24),
+ F(1024000, P_GPLL1, 14, 1, 63),
+ F(1152000, P_XO, 1, 3, 50),
+ F(1411200, P_GPLL1, 16, 1, 40),
+ F(1536000, P_XO, 1, 2, 25),
+ F(1600000, P_XO, 12, 0, 0),
+ F(2048000, P_GPLL1, 9, 1, 49),
+ F(2400000, P_XO, 8, 0, 0),
+ F(2822400, P_GPLL1, 16, 1, 20),
+ F(3072000, P_GPLL1, 14, 1, 21),
+ F(4096000, P_GPLL1, 9, 2, 49),
+ F(4800000, P_XO, 4, 0, 0),
+ F(5644800, P_GPLL1, 16, 1, 10),
+ F(6144000, P_GPLL1, 7, 1, 21),
+ F(8192000, P_GPLL1, 9, 4, 49),
+ F(9600000, P_XO, 2, 0, 0),
+ F(11289600, P_GPLL1, 16, 1, 5),
+ F(12288000, P_GPLL1, 7, 2, 21),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_lpaif_pri_i2s_clk_src = {
+ .cmd_rcgr = 0x1c054,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_epi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_pri_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_epi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_pri_i2s_clk = {
+ .halt_reg = 0x1c068,
+ .clkr = {
+ .enable_reg = 0x1c068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_pri_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_pri_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_sec_i2s_clk_src = {
+ .cmd_rcgr = 0x1c06c,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_esi2s_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_sec_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_esi2s_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_sec_i2s_clk = {
+ .halt_reg = 0x1c080,
+ .clkr = {
+ .enable_reg = 0x1c080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_sec_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_sec_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_rcg2 ultaudio_lpaif_aux_i2s_clk_src = {
+ .cmd_rcgr = 0x1c084,
+ .hid_width = 5,
+ .mnd_width = 8,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_lpaif_i2s_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_lpaif_aux_i2s_clk_src",
+ .parent_names = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_lpaif_aux_i2s_clk = {
+ .halt_reg = 0x1c098,
+ .clkr = {
+ .enable_reg = 0x1c098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_lpaif_aux_i2s_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_lpaif_aux_i2s_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ultaudio_xo_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 ultaudio_xo_clk_src = {
+ .cmd_rcgr = 0x1c034,
+ .hid_width = 5,
+ .parent_map = gcc_xo_sleep_map,
+ .freq_tbl = ftbl_gcc_ultaudio_xo_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "ultaudio_xo_clk_src",
+ .parent_names = gcc_xo_sleep,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ultaudio_avsync_xo_clk = {
+ .halt_reg = 0x1c04c,
+ .clkr = {
+ .enable_reg = 0x1c04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_avsync_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_stc_xo_clk = {
+ .halt_reg = 0x1c050,
+ .clkr = {
+ .enable_reg = 0x1c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_stc_xo_clk",
+ .parent_names = (const char *[]){
+ "ultaudio_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_codec_clk[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(11289600, P_EXT_MCLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 codec_digcodec_clk_src = {
+ .cmd_rcgr = 0x1c09c,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll1_emclk_sleep_map,
+ .freq_tbl = ftbl_codec_clk,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "codec_digcodec_clk_src",
+ .parent_names = gcc_xo_gpll1_emclk_sleep,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_codec_digcodec_clk = {
+ .halt_reg = 0x1c0b0,
+ .clkr = {
+ .enable_reg = 0x1c0b0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_codec_digcodec_clk",
+ .parent_names = (const char *[]){
+ "codec_digcodec_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_mport_clk = {
+ .halt_reg = 0x1c000,
+ .clkr = {
+ .enable_reg = 0x1c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_mport_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ultaudio_pcnoc_sway_clk = {
+ .halt_reg = 0x1c004,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ultaudio_pcnoc_sway_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_bfdcd_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
F(100000000, P_GPLL0, 8, 0, 0),
F(160000000, P_GPLL0, 5, 0, 0),
@@ -2358,6 +2742,51 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
},
};
+static struct clk_rcg2 bimc_ddr_clk_src = {
+ .cmd_rcgr = 0x32004,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_bimc_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "bimc_ddr_clk_src",
+ .parent_names = gcc_xo_gpll0_bimc,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .parent_names = (const char *[]){
+ "bimc_ddr_clk_src",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_gtcu_ahb_clk = {
.halt_reg = 0x12044,
.clkr = {
@@ -2375,6 +2804,40 @@ static struct clk_branch gcc_gtcu_ahb_clk = {
},
};
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x31024,
+ .clkr = {
+ .enable_reg = 0x31024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gfx_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_clk = {
+ .halt_reg = 0x31040,
+ .clkr = {
+ .enable_reg = 0x31040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_clk",
+ .parent_names = (const char *[]){
+ "bimc_gpu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_jpeg_tbu_clk = {
.halt_reg = 0x12034,
.clkr = {
@@ -2562,6 +3025,42 @@ static struct clk_branch gcc_venus0_vcodec0_clk = {
},
};
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .pd = {
+ .name = "venus",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .pd = {
+ .name = "jpeg",
+ },
+};
+
+static struct gdsc vfe_gdsc = {
+ .gdscr = 0x58034,
+ .pd = {
+ .name = "vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x5901c,
+ .pd = {
+ .name = "oxili",
+ },
+ .root_con_id = "gfx3d_clk_src",
+};
+
static struct clk_regmap *gcc_msm8916_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2701,6 +3200,36 @@ static struct clk_regmap *gcc_msm8916_clocks[] = {
[GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
[GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
[GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [BIMC_DDR_CLK_SRC] = &bimc_ddr_clk_src.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_GFX_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [BIMC_GPU_CLK_SRC] = &bimc_gpu_clk_src.clkr,
+ [GCC_BIMC_GFX_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_BIMC_GPU_CLK] = &gcc_bimc_gpu_clk.clkr,
+ [ULTAUDIO_AHBFABRIC_CLK_SRC] = &ultaudio_ahbfabric_clk_src.clkr,
+ [ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC] = &ultaudio_lpaif_pri_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC] = &ultaudio_lpaif_sec_i2s_clk_src.clkr,
+ [ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC] = &ultaudio_lpaif_aux_i2s_clk_src.clkr,
+ [ULTAUDIO_XO_CLK_SRC] = &ultaudio_xo_clk_src.clkr,
+ [CODEC_DIGCODEC_CLK_SRC] = &codec_digcodec_clk_src.clkr,
+ [GCC_ULTAUDIO_PCNOC_MPORT_CLK] = &gcc_ultaudio_pcnoc_mport_clk.clkr,
+ [GCC_ULTAUDIO_PCNOC_SWAY_CLK] = &gcc_ultaudio_pcnoc_sway_clk.clkr,
+ [GCC_ULTAUDIO_AVSYNC_XO_CLK] = &gcc_ultaudio_avsync_xo_clk.clkr,
+ [GCC_ULTAUDIO_STC_XO_CLK] = &gcc_ultaudio_stc_xo_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_clk.clkr,
+ [GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK] = &gcc_ultaudio_ahbfabric_ixfabric_lpm_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK] = &gcc_ultaudio_lpaif_pri_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK] = &gcc_ultaudio_lpaif_sec_i2s_clk.clkr,
+ [GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK] = &gcc_ultaudio_lpaif_aux_i2s_clk.clkr,
+ [GCC_CODEC_DIGCODEC_CLK] = &gcc_codec_digcodec_clk.clkr,
+};
+
+static struct gdsc *gcc_msm8916_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE_GDSC] = &vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
};
static const struct qcom_reset_map gcc_msm8916_resets[] = {
@@ -2810,6 +3339,8 @@ static const struct qcom_cc_desc gcc_msm8916_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8916_clocks),
.resets = gcc_msm8916_resets,
.num_resets = ARRAY_SIZE(gcc_msm8916_resets),
+ .gdscs = gcc_msm8916_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8916_gdscs),
};
static const struct of_device_id gcc_msm8916_match_table[] = {
@@ -2823,15 +3354,15 @@ static int gcc_msm8916_probe(struct platform_device *pdev)
struct clk *clk;
struct device *dev = &pdev->dev;
- /* Temporary until RPM clocks supported */
- clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (!IS_ENABLED(CONFIG_QCOM_RPMCC)) {
+ /* RPM clocks are not enabled */
+ clk = clk_register_fixed_factor(dev, "xo", "xo_board", 0, 1, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
- CLK_IS_ROOT, 32768);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+ CLK_IS_ROOT, 32768);
+ }
return qcom_cc_probe(pdev, &gcc_msm8916_desc);
}
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index aa294b1bad34..e7cdf482d427 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -30,6 +30,7 @@
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-branch.h"
+#include "clk-hfpll.h"
#include "reset.h"
static struct clk_pll pll3 = {
@@ -86,6 +87,164 @@ static struct clk_regmap pll8_vote = {
},
};
+static struct hfpll_data hfpll0_data = {
+ .mode_reg = 0x3200,
+ .l_reg = 0x3208,
+ .m_reg = 0x320c,
+ .n_reg = 0x3210,
+ .config_reg = 0x3204,
+ .status_reg = 0x321c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3214,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll0 = {
+ .d = &hfpll0_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll0",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll0.lock),
+};
+
+static struct hfpll_data hfpll1_8064_data = {
+ .mode_reg = 0x3240,
+ .l_reg = 0x3248,
+ .m_reg = 0x324c,
+ .n_reg = 0x3250,
+ .config_reg = 0x3244,
+ .status_reg = 0x325c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3254,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll1_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll1 = {
+ .d = &hfpll1_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll1",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll1.lock),
+};
+
+static struct hfpll_data hfpll2_data = {
+ .mode_reg = 0x3280,
+ .l_reg = 0x3288,
+ .m_reg = 0x328c,
+ .n_reg = 0x3290,
+ .config_reg = 0x3284,
+ .status_reg = 0x329c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3294,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll2 = {
+ .d = &hfpll2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll2.lock),
+};
+
+static struct hfpll_data hfpll3_data = {
+ .mode_reg = 0x32c0,
+ .l_reg = 0x32c8,
+ .m_reg = 0x32cc,
+ .n_reg = 0x32d0,
+ .config_reg = 0x32c4,
+ .status_reg = 0x32dc,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x32d4,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll3 = {
+ .d = &hfpll3_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll3",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll3.lock),
+};
+
+static struct hfpll_data hfpll_l2_8064_data = {
+ .mode_reg = 0x3300,
+ .l_reg = 0x3308,
+ .m_reg = 0x330c,
+ .n_reg = 0x3310,
+ .config_reg = 0x3304,
+ .status_reg = 0x331c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3314,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct hfpll_data hfpll_l2_data = {
+ .mode_reg = 0x3400,
+ .l_reg = 0x3408,
+ .m_reg = 0x340c,
+ .n_reg = 0x3410,
+ .config_reg = 0x3404,
+ .status_reg = 0x341c,
+ .config_val = 0x7845c665,
+ .droop_reg = 0x3414,
+ .droop_val = 0x0108c000,
+ .min_rate = 600000000UL,
+ .max_rate = 1800000000UL,
+};
+
+static struct clk_hfpll hfpll_l2 = {
+ .d = &hfpll_l2_data,
+ .clkr.hw.init = &(struct clk_init_data){
+ .parent_names = (const char *[]){ "pxo" },
+ .num_parents = 1,
+ .name = "hfpll_l2",
+ .ops = &clk_ops_hfpll,
+ .flags = CLK_IGNORE_UNUSED,
+ },
+ .lock = __SPIN_LOCK_UNLOCKED(hfpll_l2.lock),
+};
+
static struct clk_pll pll14 = {
.l_reg = 0x31c4,
.m_reg = 0x31c8,
@@ -3154,6 +3313,9 @@ static struct clk_regmap *gcc_msm8960_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
};
static const struct qcom_reset_map gcc_msm8960_resets[] = {
@@ -3365,6 +3527,11 @@ static struct clk_regmap *gcc_apq8064_clks[] = {
[PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
[PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
[RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+ [PLL9] = &hfpll0.clkr,
+ [PLL10] = &hfpll1.clkr,
+ [PLL12] = &hfpll_l2.clkr,
+ [PLL16] = &hfpll2.clkr,
+ [PLL17] = &hfpll3.clkr,
};
static const struct qcom_reset_map gcc_apq8064_resets[] = {
@@ -3503,28 +3670,39 @@ MODULE_DEVICE_TABLE(of, gcc_msm8960_match_table);
static int gcc_msm8960_probe(struct platform_device *pdev)
{
- struct clk *clk;
- struct device *dev = &pdev->dev;
const struct of_device_id *match;
+ struct platform_device *tsens;
+ int ret;
match = of_match_device(gcc_msm8960_match_table, &pdev->dev);
if (!match)
return -EINVAL;
- /* Temporary until RPM clocks supported */
- clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 19200000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (match->data == &gcc_apq8064_desc) {
+ hfpll1.d = &hfpll1_8064_data;
+ hfpll_l2.d = &hfpll_l2_8064_data;
+ }
+
+ ret = qcom_cc_probe(pdev, match->data);
+ if (ret)
+ return ret;
- clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 27000000);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ tsens = platform_device_register_data(&pdev->dev, "qcom-tsens", -1,
+ NULL, 0);
+ if (IS_ERR(tsens)) {
+ qcom_cc_remove(pdev);
+ return PTR_ERR(tsens);
+ }
+ platform_set_drvdata(pdev, tsens);
- return qcom_cc_probe(pdev, match->data);
+ return 0;
}
static int gcc_msm8960_remove(struct platform_device *pdev)
{
+ struct platform_device *tsens = platform_get_drvdata(pdev);
+
+ platform_device_unregister(tsens);
qcom_cc_remove(pdev);
return 0;
}
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index 2bcf87538f9d..c9c010fcf650 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -2432,6 +2433,13 @@ static struct clk_branch gcc_usb_hsic_system_clk = {
},
};
+static struct gdsc usb_hs_hsic_gdsc = {
+ .gdscr = 0x404,
+ .pd = {
+ .name = "usb_hs_hsic",
+ },
+};
+
static struct clk_regmap *gcc_msm8974_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL0_VOTE] = &gpll0_vote,
@@ -2661,6 +2669,10 @@ static const struct qcom_reset_map gcc_msm8974_resets[] = {
[GCC_VENUS_RESTART] = { 0x1740 },
};
+static struct gdsc *gcc_msm8974_gdscs[] = {
+ [USB_HS_HSIC_GDSC] = &usb_hs_hsic_gdsc,
+};
+
static const struct regmap_config gcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2675,6 +2687,8 @@ static const struct qcom_cc_desc gcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(gcc_msm8974_clocks),
.resets = gcc_msm8974_resets,
.num_resets = ARRAY_SIZE(gcc_msm8974_resets),
+ .gdscs = gcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8974_gdscs),
};
static const struct of_device_id gcc_msm8974_match_table[] = {
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
new file mode 100644
index 000000000000..480ebf6e0657
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.c
@@ -0,0 +1,228 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/jiffies.h>
+#include <linux/pm_clock.h>
+#include <linux/slab.h>
+#include "gdsc.h"
+
+#define PWR_ON_MASK BIT(31)
+#define EN_REST_WAIT_MASK GENMASK(23, 20)
+#define EN_FEW_WAIT_MASK GENMASK(19, 16)
+#define CLK_DIS_WAIT_MASK GENMASK(15, 12)
+#define SW_OVERRIDE_MASK BIT(2)
+#define HW_CONTROL_MASK BIT(1)
+#define SW_COLLAPSE_MASK BIT(0)
+
+/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
+#define EN_REST_WAIT_VAL (0x2 << 20)
+#define EN_FEW_WAIT_VAL (0x8 << 16)
+#define CLK_DIS_WAIT_VAL (0x2 << 12)
+
+#define TIMEOUT_US 100
+
+#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
+
+static int gdsc_is_enabled(struct gdsc *sc)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ return !!(val & PWR_ON_MASK);
+}
+
+static int gdsc_toggle_logic(struct gdsc *sc, bool en)
+{
+ int ret;
+ u32 val = en ? 0 : SW_COLLAPSE_MASK;
+ u32 check = en ? PWR_ON_MASK : 0;
+ unsigned long timeout;
+
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, SW_COLLAPSE_MASK, val);
+ if (ret)
+ return ret;
+
+ timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
+ do {
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ ret = regmap_read(sc->regmap, sc->gdscr, &val);
+ if (ret)
+ return ret;
+
+ if ((val & PWR_ON_MASK) == check)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int gdsc_enable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ if (sc->root_clk)
+ clk_prepare_enable(sc->root_clk);
+
+ ret = gdsc_toggle_logic(sc, true);
+ if (ret)
+ return ret;
+ /*
+ * If clocks to this power domain were already on, they will take an
+ * additional 4 clock cycles to re-enable after the power domain is
+ * enabled. Delay to account for this. A delay is also needed to ensure
+ * clocks are not enabled within 400ns of enabling power to the
+ * memories.
+ */
+ udelay(1);
+
+ return 0;
+}
+
+static int gdsc_disable(struct generic_pm_domain *domain)
+{
+ int ret;
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ ret = gdsc_toggle_logic(sc, false);
+
+ if (sc->root_clk)
+ clk_disable_unprepare(sc->root_clk);
+
+ return ret;
+}
+
+static int gdsc_attach(struct generic_pm_domain *domain, struct device *dev)
+{
+ int ret;
+ struct gdsc *sc = domain_to_gdsc(domain);
+ char **con_id;
+
+ if (!sc->con_ids[0])
+ return 0;
+
+ ret = pm_clk_create(dev);
+ if (ret) {
+ dev_err(dev, "pm_clk_create failed %d\n", ret);
+ return ret;
+ }
+
+ for (con_id = sc->con_ids; *con_id; con_id++) {
+ ret = pm_clk_add(dev, *con_id);
+ if (ret) {
+ dev_err(dev, "pm_clk_add failed %d\n", ret);
+ goto fail;
+ }
+ }
+
+ if (sc->root_con_id) {
+ sc->root_clk = clk_get(dev, sc->root_con_id);
+ if (IS_ERR(sc->root_clk)) {
+ dev_err(dev, "failed to get root clock\n");
+ return PTR_ERR(sc->root_clk);
+ }
+ }
+
+ return 0;
+fail:
+ pm_clk_destroy(dev);
+ return ret;
+};
+
+static void gdsc_detach(struct generic_pm_domain *domain, struct device *dev)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+
+ if (!sc->con_ids[0])
+ return;
+
+ pm_clk_destroy(dev);
+ return;
+};
+
+static int gdsc_init(struct gdsc *sc)
+{
+ u32 mask, val;
+ int on, ret;
+
+ /*
+ * Disable HW trigger: collapse/restore occur based on registers writes.
+ * Disable SW override: Use hardware state-machine for sequencing.
+ * Configure wait time between states.
+ */
+ mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
+ EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
+ val = EN_REST_WAIT_VAL | EN_FEW_WAIT_VAL | CLK_DIS_WAIT_VAL;
+ ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
+ if (ret)
+ return ret;
+
+ on = gdsc_is_enabled(sc);
+ if (on < 0)
+ return on;
+
+ sc->pd.power_off = gdsc_disable;
+ sc->pd.power_on = gdsc_enable;
+ sc->pd.attach_dev = gdsc_attach;
+ sc->pd.detach_dev = gdsc_detach;
+ sc->pd.flags = GENPD_FLAG_PM_CLK;
+ pm_genpd_init(&sc->pd, NULL, !on);
+
+ return 0;
+}
+
+int gdsc_register(struct device *dev, struct gdsc **scs, size_t num,
+ struct regmap *regmap)
+{
+ int i, ret;
+ struct genpd_onecell_data *data;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
+ GFP_KERNEL);
+ if (!data->domains)
+ return -ENOMEM;
+
+ data->num_domains = num;
+ for (i = 0; i < num; i++) {
+ if (!scs[i])
+ continue;
+ scs[i]->regmap = regmap;
+ ret = gdsc_init(scs[i]);
+ if (ret)
+ return ret;
+ data->domains[i] = &scs[i]->pd;
+ }
+
+ return of_genpd_add_provider_onecell(dev->of_node, data);
+}
+
+void gdsc_unregister(struct device *dev)
+{
+ of_genpd_del_provider(dev->of_node);
+}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
new file mode 100644
index 000000000000..1ad9d53fd776
--- /dev/null
+++ b/drivers/clk/qcom/gdsc.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_GDSC_H__
+#define __QCOM_GDSC_H__
+
+#include <linux/clk.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+
+/**
+ * struct gdsc - Globally Distributed Switch Controller
+ * @pd: generic power domain
+ * @regmap: regmap for MMIO accesses
+ * @gdscr: gsdc control register
+ * @con_ids: List of clocks to be controlled for the gdsc
+ */
+struct gdsc {
+ struct generic_pm_domain pd;
+ struct regmap *regmap;
+ unsigned int gdscr;
+ char *root_con_id;
+ struct clk *root_clk;
+ char *con_ids[];
+};
+
+#ifdef CONFIG_QCOM_GDSC
+int gdsc_register(struct device *, struct gdsc **, size_t n, struct regmap *);
+void gdsc_unregister(struct device *);
+#else
+static inline int gdsc_register(struct device *d, struct gdsc **g, size_t n,
+ struct regmap *r)
+{
+ return -ENOSYS;
+}
+
+static inline void gdsc_unregister(struct device *d)
+{};
+#endif /* CONFIG_QCOM_GDSC */
+#endif /* __QCOM_GDSC_H__ */
diff --git a/drivers/clk/qcom/hfpll.c b/drivers/clk/qcom/hfpll.c
new file mode 100644
index 000000000000..1492f4c79c35
--- /dev/null
+++ b/drivers/clk/qcom/hfpll.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include "clk-regmap.h"
+#include "clk-hfpll.h"
+
+static const struct hfpll_data hdata = {
+ .mode_reg = 0x00,
+ .l_reg = 0x04,
+ .m_reg = 0x08,
+ .n_reg = 0x0c,
+ .user_reg = 0x10,
+ .config_reg = 0x14,
+ .config_val = 0x430405d,
+ .status_reg = 0x1c,
+ .lock_bit = 16,
+
+ .user_val = 0x8,
+ .user_vco_mask = 0x100000,
+ .low_vco_max_rate = 1248000000,
+ .min_rate = 537600000UL,
+ .max_rate = 2900000000UL,
+};
+
+static const struct of_device_id qcom_hfpll_match_table[] = {
+ { .compatible = "qcom,hfpll" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_hfpll_match_table);
+
+static const struct regmap_config hfpll_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x30,
+ .fast_io = true,
+};
+
+static int qcom_hfpll_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void __iomem *base;
+ struct regmap *regmap;
+ struct clk_hfpll *h;
+ struct clk_init_data init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .ops = &clk_ops_hfpll,
+ };
+
+ h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
+ if (!h)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap = devm_regmap_init_mmio(&pdev->dev, base, &hfpll_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ if (of_property_read_string_index(dev->of_node, "clock-output-names",
+ 0, &init.name))
+ return -ENODEV;
+
+ h->d = &hdata;
+ h->clkr.hw.init = &init;
+ spin_lock_init(&h->lock);
+
+ clk = devm_clk_register_regmap(&pdev->dev, &h->clkr);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct platform_driver qcom_hfpll_driver = {
+ .probe = qcom_hfpll_probe,
+ .driver = {
+ .name = "qcom-hfpll",
+ .of_match_table = qcom_hfpll_match_table,
+ },
+};
+module_platform_driver(qcom_hfpll_driver);
+
+MODULE_DESCRIPTION("QCOM HFPLL Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-hfpll");
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
new file mode 100644
index 000000000000..abf6bfd053c1
--- /dev/null
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+static const char *aux_parents[] = {
+ "pll8_vote",
+ "pxo",
+};
+
+static unsigned int aux_parent_map[] = {
+ 3,
+ 0,
+};
+
+static const struct of_device_id kpss_xcc_match_table[] = {
+ { .compatible = "qcom,kpss-acc-v1", .data = (void *)1UL },
+ { .compatible = "qcom,kpss-gcc" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, kpss_xcc_match_table);
+
+static int kpss_xcc_driver_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+ struct clk *clk;
+ struct resource *res;
+ void __iomem *base;
+ const char *name;
+
+ id = of_match_device(kpss_xcc_match_table, &pdev->dev);
+ if (!id)
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (id->data) {
+ if (of_property_read_string_index(pdev->dev.of_node,
+ "clock-output-names", 0, &name))
+ return -ENODEV;
+ base += 0x14;
+ } else {
+ name = "acpu_l2_aux";
+ base += 0x28;
+ }
+
+ clk = clk_register_mux_table(&pdev->dev, name, aux_parents,
+ ARRAY_SIZE(aux_parents), 0, base, 0, 0x3,
+ 0, aux_parent_map, NULL);
+
+ platform_set_drvdata(pdev, clk);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int kpss_xcc_driver_remove(struct platform_device *pdev)
+{
+ clk_unregister_mux(platform_get_drvdata(pdev));
+ return 0;
+}
+
+static struct platform_driver kpss_xcc_driver = {
+ .probe = kpss_xcc_driver_probe,
+ .remove = kpss_xcc_driver_remove,
+ .driver = {
+ .name = "kpss-xcc",
+ .of_match_table = kpss_xcc_match_table,
+ },
+};
+module_platform_driver(kpss_xcc_driver);
+
+MODULE_DESCRIPTION("Krait Processor Sub System (KPSS) Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kpss-xcc");
diff --git a/drivers/clk/qcom/krait-cc.c b/drivers/clk/qcom/krait-cc.c
new file mode 100644
index 000000000000..f55b5ecd0df8
--- /dev/null
+++ b/drivers/clk/qcom/krait-cc.c
@@ -0,0 +1,352 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+
+#include "clk-krait.h"
+
+static unsigned int sec_mux_map[] = {
+ 2,
+ 0,
+};
+
+static unsigned int pri_mux_map[] = {
+ 1,
+ 2,
+ 0,
+};
+
+static int
+krait_add_div(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_div2_clk *div;
+ struct clk_init_data init = {
+ .num_parents = 1,
+ .ops = &krait_div2_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ const char *p_names[1];
+ struct clk *clk;
+
+ div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
+ if (!div)
+ return -ENOMEM;
+
+ div->width = 2;
+ div->shift = 6;
+ div->lpl = id >= 0;
+ div->offset = offset;
+ div->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ init.parent_names = p_names;
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ kfree(init.name);
+ return -ENOMEM;
+ }
+
+ clk = devm_clk_register(dev, &div->hw);
+ kfree(p_names[0]);
+ kfree(init.name);
+
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static int
+krait_add_sec_mux(struct device *dev, int id, const char *s, unsigned offset,
+ bool unique_aux)
+{
+ struct krait_mux_clk *mux;
+ static const char *sec_mux_list[] = {
+ "acpu_aux",
+ "qsb",
+ };
+ struct clk_init_data init = {
+ .parent_names = sec_mux_list,
+ .num_parents = ARRAY_SIZE(sec_mux_list),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return -ENOMEM;
+
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->has_safe_parent = true;
+ mux->safe_sel = 2;
+ mux->mask = 0x3;
+ mux->shift = 2;
+ mux->parent_map = sec_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!init.name)
+ return -ENOMEM;
+
+ if (unique_aux) {
+ sec_mux_list[0] = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
+ if (!sec_mux_list[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_aux;
+ }
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ if (unique_aux)
+ kfree(sec_mux_list[0]);
+err_aux:
+ kfree(init.name);
+ return PTR_ERR_OR_ZERO(clk);
+}
+
+static struct clk *
+krait_add_pri_mux(struct device *dev, int id, const char *s, unsigned offset)
+{
+ struct krait_mux_clk *mux;
+ const char *p_names[3];
+ struct clk_init_data init = {
+ .parent_names = p_names,
+ .num_parents = ARRAY_SIZE(p_names),
+ .ops = &krait_mux_clk_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk *clk;
+
+ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
+ if (!mux)
+ return ERR_PTR(-ENOMEM);
+
+ mux->has_safe_parent = true;
+ mux->safe_sel = 0;
+ mux->mask = 0x3;
+ mux->shift = 0;
+ mux->offset = offset;
+ mux->lpl = id >= 0;
+ mux->parent_map = pri_mux_map;
+ mux->hw.init = &init;
+
+ init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
+ if (!init.name)
+ return ERR_PTR(-ENOMEM);
+
+ p_names[0] = kasprintf(GFP_KERNEL, "hfpll%s", s);
+ if (!p_names[0]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p0;
+ }
+
+ p_names[1] = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
+ if (!p_names[1]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p1;
+ }
+
+ p_names[2] = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
+ if (!p_names[2]) {
+ clk = ERR_PTR(-ENOMEM);
+ goto err_p2;
+ }
+
+ clk = devm_clk_register(dev, &mux->hw);
+
+ kfree(p_names[2]);
+err_p2:
+ kfree(p_names[1]);
+err_p1:
+ kfree(p_names[0]);
+err_p0:
+ kfree(init.name);
+ return clk;
+}
+
+/* id < 0 for L2, otherwise id == physical CPU number */
+static struct clk *krait_add_clks(struct device *dev, int id, bool unique_aux)
+{
+ int ret;
+ unsigned offset;
+ void *p = NULL;
+ const char *s;
+ struct clk *clk;
+
+ if (id >= 0) {
+ offset = 0x4501 + (0x1000 * id);
+ s = p = kasprintf(GFP_KERNEL, "%d", id);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
+ } else {
+ offset = 0x500;
+ s = "_l2";
+ }
+
+ ret = krait_add_div(dev, id, s, offset);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ ret = krait_add_sec_mux(dev, id, s, offset, unique_aux);
+ if (ret) {
+ clk = ERR_PTR(ret);
+ goto err;
+ }
+
+ clk = krait_add_pri_mux(dev, id, s, offset);
+err:
+ kfree(p);
+ return clk;
+}
+
+static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
+{
+ unsigned int idx = clkspec->args[0];
+ struct clk **clks = data;
+
+ if (idx >= 5) {
+ pr_err("%s: invalid clock index %d\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clks[idx] ? : ERR_PTR(-ENODEV);
+}
+
+static const struct of_device_id krait_cc_match_table[] = {
+ { .compatible = "qcom,krait-cc-v1", (void *)1UL },
+ { .compatible = "qcom,krait-cc-v2" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, krait_cc_match_table);
+
+static int krait_cc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ unsigned long cur_rate, aux_rate;
+ int cpu;
+ struct clk *clk;
+ struct clk **clks;
+ struct clk *l2_pri_mux_clk;
+
+ id = of_match_device(krait_cc_match_table, dev);
+ if (!id)
+ return -ENODEV;
+
+ /* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
+ clk = clk_register_fixed_rate(dev, "qsb", NULL, CLK_IS_ROOT, 1);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ if (!id->data) {
+ clk = clk_register_fixed_factor(dev, "acpu_aux",
+ "gpll0_vote", 0, 1, 2);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ }
+
+ /* Krait configurations have at most 4 CPUs and one L2 */
+ clks = devm_kcalloc(dev, 5, sizeof(*clks), GFP_KERNEL);
+ if (!clks)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ clk = krait_add_clks(dev, cpu, id->data);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ clks[cpu] = clk;
+ }
+
+ l2_pri_mux_clk = krait_add_clks(dev, -1, id->data);
+ if (IS_ERR(l2_pri_mux_clk))
+ return PTR_ERR(l2_pri_mux_clk);
+ clks[4] = l2_pri_mux_clk;
+
+ /*
+ * We don't want the CPU or L2 clocks to be turned off at late init
+ * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
+ * refcount of these clocks. Any cpufreq/hotplug manager can assume
+ * that the clocks have already been prepared and enabled by the time
+ * they take over.
+ */
+ for_each_online_cpu(cpu) {
+ clk_prepare_enable(l2_pri_mux_clk);
+ WARN(clk_prepare_enable(clks[cpu]),
+ "Unable to turn on CPU%d clock", cpu);
+ }
+
+ /*
+ * Force reinit of HFPLLs and muxes to overwrite any potential
+ * incorrect configuration of HFPLLs and muxes by the bootloader.
+ * While at it, also make sure the cores are running at known rates
+ * and print the current rate.
+ *
+ * The clocks are set to aux clock rate first to make sure the
+ * secondary mux is not sourcing off of QSB. The rate is then set to
+ * two different rates to force a HFPLL reinit under all
+ * circumstances.
+ */
+ cur_rate = clk_get_rate(l2_pri_mux_clk);
+ aux_rate = 384000000;
+ if (cur_rate == 1) {
+ pr_info("L2 @ QSB rate. Forcing new rate.\n");
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(l2_pri_mux_clk, aux_rate);
+ clk_set_rate(l2_pri_mux_clk, 2);
+ clk_set_rate(l2_pri_mux_clk, cur_rate);
+ pr_info("L2 @ %lu KHz\n", clk_get_rate(l2_pri_mux_clk) / 1000);
+ for_each_possible_cpu(cpu) {
+ clk = clks[cpu];
+ cur_rate = clk_get_rate(clk);
+ if (cur_rate == 1) {
+ pr_info("CPU%d @ QSB rate. Forcing new rate.\n", cpu);
+ cur_rate = aux_rate;
+ }
+ clk_set_rate(clk, aux_rate);
+ clk_set_rate(clk, 2);
+ clk_set_rate(clk, cur_rate);
+ pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
+ }
+
+ of_clk_add_provider(dev->of_node, krait_of_get, clks);
+
+ return 0;
+}
+
+static struct platform_driver krait_cc_driver = {
+ .probe = krait_cc_probe,
+ .driver = {
+ .name = "krait-cc",
+ .of_match_table = krait_cc_match_table,
+ },
+};
+module_platform_driver(krait_cc_driver);
+
+MODULE_DESCRIPTION("Krait CPU Clock Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:krait-cc");
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index f0ee6bde11af..2a817d2b8d76 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -26,6 +26,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -571,17 +572,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -596,7 +591,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -844,21 +838,15 @@ static struct clk_rcg2 cpp_clk_src = {
},
};
-static struct freq_tbl byte_freq_tbl[] = {
- { .src = P_DSI0PLL_BYTE },
- { }
-};
-
static struct clk_rcg2 byte0_clk_src = {
.cmd_rcgr = 0x2120,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -867,12 +855,11 @@ static struct clk_rcg2 byte1_clk_src = {
.cmd_rcgr = 0x2140,
.hid_width = 5,
.parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
- .freq_tbl = byte_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -3077,6 +3064,48 @@ static const struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus0",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .pd = {
+ .name = "camss_vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .pd = {
+ .name = "oxili",
+ },
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+};
+
static struct clk_regmap *mmcc_apq8084_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -3294,6 +3323,15 @@ static const struct qcom_reset_map mmcc_apq8084_resets[] = {
[MMSSNOCAXI_RESET] = { 0x5060 },
};
+static struct gdsc *mmcc_apq8084_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_apq8084_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -3308,6 +3346,8 @@ static const struct qcom_cc_desc mmcc_apq8084_desc = {
.num_clks = ARRAY_SIZE(mmcc_apq8084_clocks),
.resets = mmcc_apq8084_resets,
.num_resets = ARRAY_SIZE(mmcc_apq8084_resets),
+ .gdscs = mmcc_apq8084_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_apq8084_gdscs),
};
static const struct of_device_id mmcc_apq8084_match_table[] = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index 0987bf443e1f..8606f88ae926 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -31,6 +31,7 @@
#include "clk-rcg.h"
#include "clk-branch.h"
#include "reset.h"
+#include "gdsc.h"
enum {
P_XO,
@@ -522,17 +523,11 @@ static struct clk_rcg2 jpeg2_clk_src = {
},
};
-static struct freq_tbl pixel_freq_tbl[] = {
- { .src = P_DSI0PLL },
- { }
-};
-
static struct clk_rcg2 pclk0_clk_src = {
.cmd_rcgr = 0x2000,
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -547,7 +542,6 @@ static struct clk_rcg2 pclk1_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
- .freq_tbl = pixel_freq_tbl,
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
@@ -785,7 +779,7 @@ static struct clk_rcg2 byte0_clk_src = {
.name = "byte0_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -799,7 +793,7 @@ static struct clk_rcg2 byte1_clk_src = {
.name = "byte1_clk_src",
.parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
.num_parents = 6,
- .ops = &clk_byte_ops,
+ .ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -2349,6 +2343,48 @@ static struct pll_config mmpll3_config = {
.aux_output_mask = BIT(1),
};
+static struct gdsc venus0_gdsc = {
+ .gdscr = 0x1024,
+ .pd = {
+ .name = "venus0",
+ },
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x2304,
+ .pd = {
+ .name = "mdss",
+ },
+};
+
+static struct gdsc camss_jpeg_gdsc = {
+ .gdscr = 0x35a4,
+ .pd = {
+ .name = "camss_jpeg",
+ },
+};
+
+static struct gdsc camss_vfe_gdsc = {
+ .gdscr = 0x36a4,
+ .pd = {
+ .name = "camss_vfe",
+ },
+};
+
+static struct gdsc oxili_gdsc = {
+ .gdscr = 0x4024,
+ .pd = {
+ .name = "oxili",
+ },
+};
+
+static struct gdsc oxilicx_gdsc = {
+ .gdscr = 0x4034,
+ .pd = {
+ .name = "oxilicx",
+ },
+};
+
static struct clk_regmap *mmcc_msm8974_clocks[] = {
[MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
[MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
@@ -2525,6 +2561,15 @@ static const struct qcom_reset_map mmcc_msm8974_resets[] = {
[OCMEMNOC_RESET] = { 0x50b0 },
};
+static struct gdsc *mmcc_msm8974_gdscs[] = {
+ [VENUS0_GDSC] = &venus0_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [CAMSS_JPEG_GDSC] = &camss_jpeg_gdsc,
+ [CAMSS_VFE_GDSC] = &camss_vfe_gdsc,
+ [OXILI_GDSC] = &oxili_gdsc,
+ [OXILICX_GDSC] = &oxilicx_gdsc,
+};
+
static const struct regmap_config mmcc_msm8974_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
@@ -2539,6 +2584,8 @@ static const struct qcom_cc_desc mmcc_msm8974_desc = {
.num_clks = ARRAY_SIZE(mmcc_msm8974_clocks),
.resets = mmcc_msm8974_resets,
.num_resets = ARRAY_SIZE(mmcc_msm8974_resets),
+ .gdscs = mmcc_msm8974_gdscs,
+ .num_gdscs = ARRAY_SIZE(mmcc_msm8974_gdscs),
};
static const struct of_device_id mmcc_msm8974_match_table[] = {
diff --git a/drivers/clk/qcom/rpmcc-apq8064.c b/drivers/clk/qcom/rpmcc-apq8064.c
new file mode 100644
index 000000000000..392cca7d69c1
--- /dev/null
+++ b/drivers/clk/qcom/rpmcc-apq8064.c
@@ -0,0 +1,347 @@
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/qcom_rpm.h>
+#include <dt-bindings/mfd/qcom-rpm.h>
+
+struct rpm_cc {
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+struct rpm_clk {
+ const int rpm_clk_id;
+ struct qcom_rpm *rpm;
+ unsigned last_set_khz;
+ bool enabled;
+ bool branch; /* true: RPM only accepts 1 for ON and 0 for OFF */
+ unsigned factor;
+ struct clk_hw hw;
+};
+
+#define to_rpm_clk(_hw) container_of(_hw, struct rpm_clk, hw)
+
+static int rpm_clk_prepare(struct clk_hw *hw)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ uint32_t value;
+ int rc = 0;
+ unsigned long this_khz;
+
+ this_khz = r->last_set_khz;
+ /* Don't send requests to the RPM if the rate has not been set. */
+ if (r->last_set_khz == 0)
+ goto out;
+
+ value = this_khz;
+ if (r->branch)
+ value = !!value;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ goto out;
+
+out:
+ if (!rc)
+ r->enabled = true;
+ return rc;
+}
+
+static void rpm_clk_unprepare(struct clk_hw *hw)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+
+ if (r->last_set_khz) {
+ uint32_t value = 0;
+ int rc;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ return;
+
+ }
+ r->enabled = false;
+}
+
+int rpm_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long prate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ unsigned long this_khz;
+ int rc = 0;
+
+ this_khz = DIV_ROUND_UP(rate, r->factor);
+
+ if (r->enabled) {
+ uint32_t value = this_khz;
+
+ rc = qcom_rpm_write(r->rpm, QCOM_RPM_ACTIVE_STATE,
+ r->rpm_clk_id, &value, 1);
+ if (rc)
+ goto out;
+ }
+
+ if (!rc)
+ r->last_set_khz = this_khz;
+
+out:
+ return rc;
+}
+
+static long rpm_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long rpm_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+ u32 val;
+ int rc;
+
+ rc = qcom_rpm_read(r->rpm, r->rpm_clk_id, &val, 1);
+ if (rc < 0)
+ return 0;
+
+ return val * r->factor;
+}
+
+static unsigned long rpm_branch_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct rpm_clk *r = to_rpm_clk(hw);
+
+ return r->last_set_khz * r->factor;
+}
+
+static const struct clk_ops branch_clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .recalc_rate = rpm_branch_clk_recalc_rate,
+ .round_rate = rpm_clk_round_rate,
+};
+
+static const struct clk_ops clk_ops_rpm = {
+ .prepare = rpm_clk_prepare,
+ .unprepare = rpm_clk_unprepare,
+ .set_rate = rpm_clk_set_rate,
+ .recalc_rate = rpm_clk_recalc_rate,
+ .round_rate = rpm_clk_round_rate,
+};
+
+static struct rpm_clk pxo_clk = {
+ .rpm_clk_id = QCOM_RPM_PXO_CLK,
+ .branch = true,
+ .factor = 1000,
+ .last_set_khz = 27000,
+ .hw.init = &(struct clk_init_data){
+ .name = "pxo",
+ .ops = &branch_clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk cxo_clk = {
+ .rpm_clk_id = QCOM_RPM_CXO_CLK,
+ .branch = true,
+ .factor = 1000,
+ .last_set_khz = 19200,
+ .hw.init = &(struct clk_init_data){
+ .name = "cxo",
+ .ops = &branch_clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk afab_clk = {
+ .rpm_clk_id = QCOM_RPM_APPS_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "afab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk cfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_CFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "cfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk daytona_clk = {
+ .rpm_clk_id = QCOM_RPM_DAYTONA_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "daytona_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk ebi1_clk = {
+ .rpm_clk_id = QCOM_RPM_EBI1_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "ebi1_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk mmfab_clk = {
+ .rpm_clk_id = QCOM_RPM_MM_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "mmfab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk mmfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_MMFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "mmfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk sfab_clk = {
+ .rpm_clk_id = QCOM_RPM_SYS_FABRIC_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sfab_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+static struct rpm_clk sfpb_clk = {
+ .rpm_clk_id = QCOM_RPM_SFPB_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "sfpb_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+
+/*
+static struct rpm_clk qdss_clk = {
+ .rpm_clk_id = QCOM_RPM_QDSS_CLK,
+ .factor = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_clk",
+ .ops = &clk_ops_rpm,
+ },
+};
+*/
+
+struct rpm_clk *rpm_clks[] = {
+ [QCOM_RPM_PXO_CLK] = &pxo_clk,
+ [QCOM_RPM_CXO_CLK] = &cxo_clk,
+ [QCOM_RPM_APPS_FABRIC_CLK] = &afab_clk,
+ [QCOM_RPM_CFPB_CLK] = &cfpb_clk,
+ [QCOM_RPM_DAYTONA_FABRIC_CLK] = &daytona_clk,
+ [QCOM_RPM_EBI1_CLK] = &ebi1_clk,
+ [QCOM_RPM_MM_FABRIC_CLK] = &mmfab_clk,
+ [QCOM_RPM_MMFPB_CLK] = &mmfpb_clk,
+ [QCOM_RPM_SYS_FABRIC_CLK] = &sfab_clk,
+ [QCOM_RPM_SFPB_CLK] = &sfpb_clk,
+/** [QCOM_RPM_QDSS_CLK] = &qdss_clk, Needs more checking here **/
+};
+
+static int rpm_clk_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct rpm_cc *cc;
+ struct qcom_rpm *rpm;
+ int num_clks = ARRAY_SIZE(rpm_clks);
+ struct clk_onecell_data *data;
+ int i, ret;
+
+ if (!pdev->dev.of_node)
+ return -ENODEV;
+
+ cc = devm_kzalloc(&pdev->dev, sizeof(*cc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ clks = cc->clks;
+ data = &cc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "unable to retrieve handle to rpm\n");
+ return -ENODEV;
+ }
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+ rpm_clks[i]->rpm = rpm;
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ return ret;
+
+ /* Hold and active set vote */
+ clk_set_rate(afab_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(afab_clk.hw.clk);
+
+ return 0;
+}
+
+static int rpm_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static const struct of_device_id rpm_clk_of_match[] = {
+ { .compatible = "qcom,apq8064-rpm-clk" },
+ { },
+};
+
+static struct platform_driver rpm_clk_driver = {
+ .driver = {
+ .name = "qcom-rpm-clk",
+ .of_match_table = rpm_clk_of_match,
+ },
+ .probe = rpm_clk_probe,
+ .remove = rpm_clk_remove,
+};
+
+static int __init rpm_clk_init(void)
+{
+ return platform_driver_register(&rpm_clk_driver);
+}
+subsys_initcall(rpm_clk_init);
+
+static void __exit rpm_clk_exit(void)
+{
+ platform_driver_unregister(&rpm_clk_driver);
+}
+module_exit(rpm_clk_exit)
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org>");
+MODULE_DESCRIPTION("Driver for the RPM clocks");
diff --git a/drivers/clk/qcom/rpmcc.c b/drivers/clk/qcom/rpmcc.c
new file mode 100644
index 000000000000..d0a3f1c63f8c
--- /dev/null
+++ b/drivers/clk/qcom/rpmcc.c
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/soc/qcom/smd-rpm.h>
+
+#include "clk-smd-rpm.h"
+#include <dt-bindings/clock/qcom,rpmcc-msm8916.h>
+
+#define CXO_ID 0x0
+#define QDSS_ID 0x1
+#define BUS_SCALING 0x2
+
+#define PCNOC_ID 0x0
+#define SNOC_ID 0x1
+#define BIMC_ID 0x0
+
+#define BB_CLK1_ID 0x1
+#define BB_CLK2_ID 0x2
+#define RF_CLK1_ID 0x4
+#define RF_CLK2_ID 0x5
+
+struct rpm_cc {
+ struct qcom_rpm *rpm;
+ struct clk_onecell_data data;
+ struct clk *clks[];
+};
+
+/* SMD clocks */
+DEFINE_CLK_SMD_RPM(pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, PCNOC_ID, NULL);
+DEFINE_CLK_SMD_RPM(snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, SNOC_ID, NULL);
+DEFINE_CLK_SMD_RPM(bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, BIMC_ID, NULL);
+
+DEFINE_CLK_SMD_RPM_BRANCH(xo, xo_a, QCOM_SMD_RPM_MISC_CLK, CXO_ID, 19200000);
+DEFINE_CLK_SMD_RPM_QDSS(qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, QDSS_ID);
+
+/* SMD_XO_BUFFER */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bb_clk1, bb_clk1_a, BB_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(bb_clk2, bb_clk2_a, BB_CLK2_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(rf_clk1, rf_clk1_a, RF_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(rf_clk2, rf_clk2_a, RF_CLK2_ID);
+
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk1_pin, bb_clk1_a_pin, BB_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(bb_clk2_pin, bb_clk2_a_pin, BB_CLK2_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk1_pin, rf_clk1_a_pin, RF_CLK1_ID);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(rf_clk2_pin, rf_clk2_a_pin, RF_CLK2_ID);
+
+static struct clk_smd_rpm *rpmcc_msm8916_clks[] = {
+ [RPM_XO_CLK_SRC] = &xo,
+ [RPM_XO_A_CLK_SRC] = &xo_a,
+ [RPM_PCNOC_CLK] = &pcnoc_clk,
+ [RPM_PCNOC_A_CLK] = &pcnoc_a_clk,
+ [RPM_SNOC_CLK] = &snoc_clk,
+ [RPM_SNOC_A_CLK] = &snoc_a_clk,
+ [RPM_BIMC_CLK] = &bimc_clk,
+ [RPM_BIMC_A_CLK] = &bimc_a_clk,
+ [RPM_QDSS_CLK] = &qdss_clk,
+ [RPM_QDSS_A_CLK] = &qdss_a_clk,
+ [RPM_BB_CLK1] = &bb_clk1,
+ [RPM_BB_CLK1_A] = &bb_clk1_a,
+ [RPM_BB_CLK2] = &bb_clk2,
+ [RPM_BB_CLK2_A] = &bb_clk2_a,
+ [RPM_RF_CLK1] = &rf_clk1,
+ [RPM_RF_CLK1_A] = &rf_clk1_a,
+ [RPM_RF_CLK2] = &rf_clk2,
+ [RPM_RF_CLK2_A] = &rf_clk2_a,
+ [RPM_BB_CLK1_PIN] = &bb_clk1_pin,
+ [RPM_BB_CLK1_A_PIN] = &bb_clk1_a_pin,
+ [RPM_BB_CLK2_PIN] = &bb_clk2_pin,
+ [RPM_BB_CLK2_A_PIN] = &bb_clk2_a_pin,
+ [RPM_RF_CLK1_PIN] = &rf_clk1_pin,
+ [RPM_RF_CLK1_A_PIN] = &rf_clk1_a_pin,
+ [RPM_RF_CLK2_PIN] = &rf_clk2_pin,
+ [RPM_RF_CLK2_A_PIN] = &rf_clk2_a_pin,
+};
+
+struct rpmcc_desc {
+ struct clk_smd_rpm **clks;
+ size_t num_clks;
+};
+
+static const struct rpmcc_desc rpmcc_msm8916 = {
+ .clks = rpmcc_msm8916_clks,
+ .num_clks = ARRAY_SIZE(rpmcc_msm8916_clks),
+};
+
+static const struct of_device_id rpmcc_match_table[] = {
+ { .compatible = "qcom,rpmcc-msm8916", .data = &rpmcc_msm8916},
+ { }
+};
+MODULE_DEVICE_TABLE(of, rpmcc_match_table);
+
+static int rpmcc_probe(struct platform_device *pdev)
+{
+ struct clk **clks;
+ struct clk *clk;
+ struct clk_smd_rpm **rpm_clks;
+ struct rpm_cc *rcc;
+ const struct rpmcc_desc *desc;
+ struct qcom_smd_rpm *rpm;
+ struct clk_onecell_data *data;
+ int ret, i;
+ size_t num_clks;
+ const struct of_device_id *match;
+
+ rpm = dev_get_drvdata(pdev->dev.parent);
+ if (!rpm) {
+ dev_err(&pdev->dev, "Unable to retrieve handle to RPM\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(rpmcc_match_table, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ desc = match->data;
+ rpm_clks = desc->clks;
+ num_clks = desc->num_clks;
+
+ rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc) + sizeof(*clks) * num_clks,
+ GFP_KERNEL);
+ if (!rcc)
+ return -ENOMEM;
+
+ clks = rcc->clks;
+ data = &rcc->data;
+ data->clks = clks;
+ data->clk_num = num_clks;
+
+ for (i = 0; i < num_clks; i++) {
+ if (!rpm_clks[i]) {
+ clks[i] = ERR_PTR(-ENOENT);
+ continue;
+ }
+
+ rpm_clks[i]->rpm = rpm;
+ clk = devm_clk_register(&pdev->dev, &rpm_clks[i]->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clks[i] = clk;
+ }
+
+ ret = of_clk_add_provider(pdev->dev.of_node, of_clk_src_onecell_get,
+ data);
+ if (ret)
+ return ret;
+
+ ret = clk_smd_rpm_enable_scaling(rpm);
+ if (ret)
+ return ret;
+
+ /* Hold a vote for max rates */
+ clk_set_rate(bimc_a_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(bimc_a_clk.hw.clk);
+ clk_set_rate(bimc_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(bimc_clk.hw.clk);
+ clk_set_rate(snoc_clk.hw.clk, INT_MAX);
+ clk_prepare_enable(snoc_clk.hw.clk);
+ clk_prepare_enable(xo.hw.clk);
+
+ return 0;
+}
+
+static int rpmcc_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static struct platform_driver rpmcc_driver = {
+ .driver = {
+ .name = "qcom-rpmcc",
+ .of_match_table = rpmcc_match_table,
+ },
+ .probe = rpmcc_probe,
+ .remove = rpmcc_remove,
+};
+
+static int __init rpmcc_init(void)
+{
+ return platform_driver_register(&rpmcc_driver);
+}
+core_initcall(rpmcc_init);
+
+static void __exit rpmcc_exit(void)
+{
+ platform_driver_unregister(&rpmcc_driver);
+}
+module_exit(rpmcc_exit);
+
+MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:rpmcc");
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index c737f7359974..852d8ef2ee8b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -94,6 +94,15 @@ config ARM_OMAP2PLUS_CPUFREQ
depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
+config ARM_QCOM_CPUFREQ
+ tristate "Qualcomm based"
+ depends on ARCH_QCOM
+ select PM_OPP
+ help
+ This adds the CPUFreq driver for Qualcomm SoC based boards.
+
+ If in doubt, say N.
+
config ARM_S3C_CPUFREQ
bool
help
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e8bce2b44666..849e88d50aef 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_ARM_MT8173_CPUFREQ) += mt8173-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o
obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ) += qcom-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o
obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o
obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 7c0d70e2a861..e3152155a180 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -34,6 +34,9 @@ struct private_data {
struct regulator *cpu_reg;
struct thermal_cooling_device *cdev;
unsigned int voltage_tolerance; /* in percentage */
+ struct notifier_block opp_nb;
+ struct mutex lock;
+ unsigned long opp_freq;
};
static struct freq_attr *cpufreq_dt_attr[] = {
@@ -42,17 +45,56 @@ static struct freq_attr *cpufreq_dt_attr[] = {
NULL,
};
+static int opp_notifier(struct notifier_block *nb, unsigned long event,
+ void *data)
+{
+ struct dev_pm_opp *opp = data;
+ struct private_data *priv = container_of(nb, struct private_data,
+ opp_nb);
+ struct device *cpu_dev = priv->cpu_dev;
+ struct regulator *cpu_reg = priv->cpu_reg;
+ unsigned long volt, tol, freq;
+ int ret = 0;
+
+ switch (event) {
+ case OPP_EVENT_ADJUST_VOLTAGE:
+ volt = dev_pm_opp_get_voltage(opp);
+ freq = dev_pm_opp_get_freq(opp);
+ tol = volt * priv->voltage_tolerance / 100;
+
+ mutex_lock(&priv->lock);
+ if (freq == priv->opp_freq)
+ ret = regulator_set_voltage_tol(cpu_reg, volt,
+ tol);
+ mutex_unlock(&priv->lock);
+ if (ret) {
+ dev_err(cpu_dev,
+ "failed to scale voltage up: %d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int set_target(struct cpufreq_policy *policy, unsigned int index)
{
struct dev_pm_opp *opp;
struct cpufreq_frequency_table *freq_table = policy->freq_table;
struct clk *cpu_clk = policy->clk;
+ struct clk *l2_clk = policy->l2_clk;
struct private_data *priv = policy->driver_data;
struct device *cpu_dev = priv->cpu_dev;
struct regulator *cpu_reg = priv->cpu_reg;
unsigned long volt = 0, volt_old = 0, tol = 0;
- unsigned int old_freq, new_freq;
+ unsigned int old_freq, new_freq, l2_freq;
+ unsigned long new_l2_freq = 0;
long freq_Hz, freq_exact;
+ unsigned long opp_freq = 0;
int ret;
freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
@@ -63,8 +105,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = freq_Hz / 1000;
old_freq = clk_get_rate(cpu_clk) / 1000;
+ mutex_lock(&priv->lock);
if (!IS_ERR(cpu_reg)) {
- unsigned long opp_freq;
rcu_read_lock();
opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_Hz);
@@ -72,7 +114,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
rcu_read_unlock();
dev_err(cpu_dev, "failed to find OPP for %ld\n",
freq_Hz);
- return PTR_ERR(opp);
+ ret = PTR_ERR(opp);
+ goto out;
}
volt = dev_pm_opp_get_voltage(opp);
opp_freq = dev_pm_opp_get_freq(opp);
@@ -93,7 +136,7 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
if (ret) {
dev_err(cpu_dev, "failed to scale voltage up: %d\n",
ret);
- return ret;
+ goto out;
}
}
@@ -102,7 +145,31 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
if (!IS_ERR(cpu_reg) && volt_old > 0)
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
- return ret;
+ goto out;
+ }
+
+ if (!IS_ERR(l2_clk) && policy->l2_rate[0] && policy->l2_rate[1] &&
+ policy->l2_rate[2]) {
+ static unsigned long krait_l2[CONFIG_NR_CPUS] = { };
+ int cpu, ret = 0;
+
+ if (freq_exact >= policy->l2_rate[2])
+ new_l2_freq = policy->l2_rate[2];
+ else if (freq_exact >= policy->l2_rate[1])
+ new_l2_freq = policy->l2_rate[1];
+ else
+ new_l2_freq = policy->l2_rate[0];
+
+ krait_l2[policy->cpu] = new_l2_freq;
+ for_each_present_cpu(cpu)
+ new_l2_freq = max(new_l2_freq, krait_l2[cpu]);
+
+ l2_freq = clk_get_rate(l2_clk);
+
+ if (l2_freq != new_l2_freq) {
+ /* scale l2 with the core */
+ ret = clk_set_rate(l2_clk, new_l2_freq);
+ }
}
/* scaling down? scale voltage after frequency */
@@ -112,18 +179,24 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
dev_err(cpu_dev, "failed to scale voltage down: %d\n",
ret);
clk_set_rate(cpu_clk, old_freq * 1000);
+ goto out;
}
}
+ priv->opp_freq = opp_freq;
+
+out:
+ mutex_unlock(&priv->lock);
return ret;
}
static int allocate_resources(int cpu, struct device **cdev,
- struct regulator **creg, struct clk **cclk)
+ struct regulator **creg, struct clk **cclk,
+ struct clk **l2)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk = NULL;
int ret = 0;
char *reg_cpu0 = "cpu0", *reg_cpu = "cpu", *reg;
@@ -183,6 +256,10 @@ try_again:
*cdev = cpu_dev;
*creg = cpu_reg;
*cclk = cpu_clk;
+
+ l2_clk = clk_get(cpu_dev, "l2");
+ if (!IS_ERR(l2_clk))
+ *l2 = l2_clk;
}
return ret;
@@ -192,17 +269,20 @@ static int cpufreq_init(struct cpufreq_policy *policy)
{
struct cpufreq_frequency_table *freq_table;
struct device_node *np;
+ struct device_node *l2_np;
struct private_data *priv;
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
struct dev_pm_opp *suspend_opp;
+ struct clk *cpu_clk, *l2_clk;
unsigned long min_uV = ~0, max_uV = 0;
unsigned int transition_latency;
bool need_update = false;
int ret;
+ struct srcu_notifier_head *opp_srcu_head;
- ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk,
+ &l2_clk);
if (ret) {
pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
return ret;
@@ -277,6 +357,19 @@ static int cpufreq_init(struct cpufreq_policy *policy)
goto out_free_opp;
}
+ mutex_init(&priv->lock);
+
+ opp_srcu_head = dev_pm_opp_get_notifier(cpu_dev);
+ if (IS_ERR(opp_srcu_head)) {
+ ret = PTR_ERR(opp_srcu_head);
+ goto out_free_priv;
+ }
+
+ priv->opp_nb.notifier_call = opp_notifier;
+ ret = srcu_notifier_chain_register(opp_srcu_head, &priv->opp_nb);
+ if (ret)
+ goto out_free_priv;
+
of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
if (!transition_latency)
@@ -326,7 +419,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
if (ret) {
pr_err("failed to init cpufreq table: %d\n", ret);
- goto out_free_priv;
+ goto out_unregister_nb;
}
priv->cpu_dev = cpu_dev;
@@ -340,6 +433,11 @@ static int cpufreq_init(struct cpufreq_policy *policy)
if (suspend_opp)
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
rcu_read_unlock();
+ policy->l2_clk = l2_clk;
+
+ l2_np = of_find_node_by_name(NULL, "qcom,l2");
+ if (l2_np)
+ of_property_read_u32_array(l2_np, "qcom,l2-rates", policy->l2_rate, 3);
ret = cpufreq_table_validate_and_show(policy, freq_table);
if (ret) {
@@ -365,6 +463,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
out_free_cpufreq_table:
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
+out_unregister_nb:
+ srcu_notifier_chain_unregister(opp_srcu_head, &priv->opp_nb);
out_free_priv:
kfree(priv);
out_free_opp:
@@ -438,7 +538,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
{
struct device *cpu_dev;
struct regulator *cpu_reg;
- struct clk *cpu_clk;
+ struct clk *cpu_clk, *l2_clk;
int ret;
/*
@@ -448,7 +548,7 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
*
* FIXME: Is checking this only for CPU0 sufficient ?
*/
- ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk);
+ ret = allocate_resources(0, &cpu_dev, &cpu_reg, &cpu_clk, &l2_clk);
if (ret)
return ret;
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
new file mode 100644
index 000000000000..c9f86a062cdd
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/cpufreq-dt.h>
+
+static void __init get_krait_bin_format_a(int *speed, int *pvs, int *pvs_ver)
+{
+ void __iomem *base;
+ u32 pte_efuse;
+
+ *speed = *pvs = *pvs_ver = 0;
+
+ base = ioremap(0x007000c0, 4);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ iounmap(base);
+
+ *speed = pte_efuse & 0xf;
+ if (*speed == 0xf)
+ *speed = (pte_efuse >> 4) & 0xf;
+
+ if (*speed == 0xf) {
+ *speed = 0;
+ pr_warn("Speed bin: Defaulting to %d\n", *speed);
+ } else {
+ pr_info("Speed bin: %d\n", *speed);
+ }
+
+ *pvs = (pte_efuse >> 10) & 0x7;
+ if (*pvs == 0x7)
+ *pvs = (pte_efuse >> 13) & 0x7;
+
+ if (*pvs == 0x7) {
+ *pvs = 0;
+ pr_warn("PVS bin: Defaulting to %d\n", *pvs);
+ } else {
+ pr_info("PVS bin: %d\n", *pvs);
+ }
+}
+
+static void __init get_krait_bin_format_b(int *speed, int *pvs, int *pvs_ver)
+{
+ u32 pte_efuse, redundant_sel;
+ void __iomem *base;
+
+ *speed = 0;
+ *pvs = 0;
+ *pvs_ver = 0;
+
+ base = ioremap(0xfc4b80b0, 8);
+ if (!base) {
+ pr_warn("Unable to read efuse data. Defaulting to 0!\n");
+ return;
+ }
+
+ pte_efuse = readl_relaxed(base);
+ redundant_sel = (pte_efuse >> 24) & 0x7;
+ *speed = pte_efuse & 0x7;
+ /* 4 bits of PVS are in efuse register bits 31, 8-6. */
+ *pvs = ((pte_efuse >> 28) & 0x8) | ((pte_efuse >> 6) & 0x7);
+ *pvs_ver = (pte_efuse >> 4) & 0x3;
+
+ switch (redundant_sel) {
+ case 1:
+ *speed = (pte_efuse >> 27) & 0xf;
+ break;
+ case 2:
+ *pvs = (pte_efuse >> 27) & 0xf;
+ break;
+ }
+
+ /* Check SPEED_BIN_BLOW_STATUS */
+ if (pte_efuse & BIT(3)) {
+ pr_info("Speed bin: %d\n", *speed);
+ } else {
+ pr_warn("Speed bin not set. Defaulting to 0!\n");
+ *speed = 0;
+ }
+
+ /* Check PVS_BLOW_STATUS */
+ pte_efuse = readl_relaxed(base + 0x4) & BIT(21);
+ if (pte_efuse) {
+ pr_info("PVS bin: %d\n", *pvs);
+ } else {
+ pr_warn("PVS bin not set. Defaulting to 0!\n");
+ *pvs = 0;
+ }
+
+ pr_info("PVS version: %d\n", *pvs_ver);
+ iounmap(base);
+}
+
+static int __init qcom_cpufreq_populate_opps(void)
+{
+ int len, rows, cols, i, k, speed, pvs, pvs_ver;
+ char table_name[] = "qcom,speedXX-pvsXX-bin-vXX";
+ struct device_node *np;
+ struct device *dev;
+ int cpu = 0;
+
+ np = of_find_node_by_name(NULL, "qcom,pvs");
+ if (!np)
+ return -ENODEV;
+
+ if (of_property_read_bool(np, "qcom,pvs-format-a")) {
+ get_krait_bin_format_a(&speed, &pvs, &pvs_ver);
+ cols = 2;
+ } else if (of_property_read_bool(np, "qcom,pvs-format-b")) {
+ get_krait_bin_format_b(&speed, &pvs, &pvs_ver);
+ cols = 3;
+ } else {
+ return -ENODEV;
+ }
+
+ snprintf(table_name, sizeof(table_name),
+ "qcom,speed%d-pvs%d-bin-v%d", speed, pvs, pvs_ver);
+
+ if (!of_find_property(np, table_name, &len))
+ return -EINVAL;
+
+ len /= sizeof(u32);
+ if (len % cols || len == 0)
+ return -EINVAL;
+
+ rows = len / cols;
+
+ for (i = 0, k = 0; i < rows; i++) {
+ u32 freq, volt;
+
+ of_property_read_u32_index(np, table_name, k++, &freq);
+ of_property_read_u32_index(np, table_name, k++, &volt);
+ while (k % cols)
+ k++; /* Skip uA entries if present */
+ for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
+ dev = get_cpu_device(cpu);
+ if (!dev)
+ return -ENODEV;
+ if (dev_pm_opp_add(dev, freq, volt))
+ pr_warn("failed to add OPP %u\n", freq);
+ }
+ }
+
+ return 0;
+}
+
+static int __init qcom_cpufreq_driver_init(void)
+{
+ struct cpufreq_dt_platform_data pdata = { .independent_clocks = true };
+ struct platform_device_info devinfo = {
+ .name = "cpufreq-dt",
+ .data = &pdata,
+ .size_data = sizeof(pdata),
+ };
+ struct device *cpu_dev;
+ struct device_node *np;
+ int ret;
+
+ cpu_dev = get_cpu_device(0);
+ if (!cpu_dev)
+ return -ENODEV;
+
+ np = of_node_get(cpu_dev->of_node);
+ if (!np)
+ return -ENOENT;
+
+ if (!of_device_is_compatible(np, "qcom,krait")) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ ret = qcom_cpufreq_populate_opps();
+ if (ret)
+ return ret;
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+module_init(qcom_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm CPUfreq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 21340e0be73e..325be7fe4e8d 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -74,3 +74,10 @@ config ARM_MVEBU_V7_CPUIDLE
depends on ARCH_MVEBU && !ARM64
help
Select this to enable cpuidle on Armada 370, 38x and XP processors.
+
+config ARM_QCOM_CPUIDLE
+ bool "CPU Idle Driver for QCOM processors"
+ depends on ARCH_QCOM
+ select ARM_CPUIDLE
+ help
+ Select this to enable cpuidle on QCOM processors.
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index b4584757dae0..b7336aec3386 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -558,4 +558,23 @@ config DMATEST
config DMA_ENGINE_RAID
bool
+config QCOM_BAM_DMA
+ tristate "QCOM BAM DMA support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the QCOM BAM DMA controller. This controller
+ provides DMA capabilities for a variety of on-chip devices.
+
+config QCOM_ADM
+ tristate "Qualcomm ADM support"
+ depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ ---help---
+ Enable support for the Qualcomm ADM DMA controller. This controller
+ provides DMA capabilities for both general purpose and on-chip
+ peripheral devices.
+
endif
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 7711a7180726..ea264ee33eff 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
obj-$(CONFIG_PXA_DMA) += pxa_dma.o
obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
+obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
obj-$(CONFIG_RENESAS_DMA) += sh/
obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
diff --git a/drivers/dma/qcom_adm.c b/drivers/dma/qcom_adm.c
new file mode 100644
index 000000000000..9c4f8962f0c6
--- /dev/null
+++ b/drivers/dma/qcom_adm.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+
+#include "dmaengine.h"
+#include "virt-dma.h"
+
+/* ADM registers - calculated from channel number and security domain */
+#define ADM_CHAN_MULTI 0x4
+#define ADM_CI_MULTI 0x4
+#define ADM_CRCI_MULTI 0x4
+#define ADM_EE_MULTI 0x800
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * ee)
+#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * chan)
+#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
+#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
+#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
+#define ADM_CI_CONF(ci) (0x390 + ci * ADM_CI_MULTI)
+#define ADM_GP_CTL 0x3d8
+#define ADM_CRCI_CTL(crci, ee) (0x400 + crci * ADM_CRCI_MULTI + \
+ ADM_EE_OFFS(ee))
+
+/* channel status */
+#define ADM_CH_STATUS_VALID BIT(1)
+
+/* channel result */
+#define ADM_CH_RSLT_VALID BIT(31)
+#define ADM_CH_RSLT_ERR BIT(3)
+#define ADM_CH_RSLT_FLUSH BIT(2)
+#define ADM_CH_RSLT_TPD BIT(1)
+
+/* channel conf */
+#define ADM_CH_CONF_SHADOW_EN BIT(12)
+#define ADM_CH_CONF_MPU_DISABLE BIT(11)
+#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
+#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
+#define ADM_CH_CONF_SEC_DOMAIN(ee) (((ee & 0x3) << 4) | ((ee & 0x4) << 11))
+
+/* channel result conf */
+#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
+#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
+
+/* CRCI CTL */
+#define ADM_CRCI_CTL_MUX_SEL BIT(18)
+#define ADM_CRCI_CTL_RST BIT(17)
+
+/* CI configuration */
+#define ADM_CI_RANGE_END(x) (x << 24)
+#define ADM_CI_RANGE_START(x) (x << 16)
+#define ADM_CI_BURST_4_WORDS BIT(2)
+#define ADM_CI_BURST_8_WORDS BIT(3)
+
+/* GP CTL */
+#define ADM_GP_CTL_LP_EN BIT(12)
+#define ADM_GP_CTL_LP_CNT(x) (x << 8)
+
+/* Command pointer list entry */
+#define ADM_CPLE_LP BIT(31)
+#define ADM_CPLE_CMD_PTR_LIST BIT(29)
+
+/* Command list entry */
+#define ADM_CMD_LC BIT(31)
+#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
+#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
+
+#define ADM_CMD_TYPE_SINGLE 0x0
+#define ADM_CMD_TYPE_BOX 0x3
+
+#define ADM_CRCI_MUX_SEL BIT(4)
+#define ADM_DESC_ALIGN 8
+#define ADM_MAX_XFER (SZ_64K-1)
+#define ADM_MAX_ROWS (SZ_64K-1)
+#define ADM_MAX_CHANNELS 16
+
+struct adm_desc_hw_box {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 row_len;
+ u32 num_rows;
+ u32 row_offset;
+};
+
+struct adm_desc_hw_single {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 len;
+};
+
+struct adm_async_desc {
+ struct virt_dma_desc vd;
+ struct adm_device *adev;
+
+ size_t length;
+ enum dma_transfer_direction dir;
+ dma_addr_t dma_addr;
+ size_t dma_len;
+
+ void *cpl;
+ dma_addr_t cp_addr;
+ u32 crci;
+ u32 mux;
+ u32 blk_size;
+};
+
+struct adm_chan {
+ struct virt_dma_chan vc;
+ struct adm_device *adev;
+
+ /* parsed from DT */
+ u32 id; /* channel id */
+
+ struct adm_async_desc *curr_txd;
+ struct dma_slave_config slave;
+ struct list_head node;
+
+ int error;
+ int initialized;
+};
+
+static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
+{
+ return container_of(common, struct adm_chan, vc.chan);
+}
+
+struct adm_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct adm_chan *channels;
+
+ u32 ee;
+
+ struct clk *core_clk;
+ struct clk *iface_clk;
+
+ int irq;
+};
+
+/**
+ * adm_free_chan - Frees dma resources associated with the specific channel
+ *
+ * Free all allocated descriptors associated with this channel
+ *
+ */
+static void adm_free_chan(struct dma_chan *chan)
+{
+ /* free all queued descriptors */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+/**
+ * adm_get_blksize - Get block size from burst value
+ *
+ */
+static int adm_get_blksize(unsigned int burst)
+{
+ int ret;
+
+ switch (burst) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ ret = ffs(burst>>4) - 1;
+ break;
+ case 192:
+ ret = 4;
+ break;
+ case 256:
+ ret = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @crci: CRCI value
+ * @burst: Burst size of transaction
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg, u32 crci, u32 burst,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_box *box_desc = NULL;
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 rows, row_offset, crci_cmd;
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ crci_cmd = ADM_CMD_SRC_CRCI(crci);
+ row_offset = burst;
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ crci_cmd = ADM_CMD_DST_CRCI(crci);
+ row_offset = burst << 16;
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ while (remainder >= burst) {
+ box_desc = desc;
+ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
+ box_desc->row_offset = row_offset;
+ box_desc->src_addr = *src;
+ box_desc->dst_addr = *dst;
+
+ rows = remainder / burst;
+ rows = min_t(u32, rows, ADM_MAX_ROWS);
+ box_desc->num_rows = rows << 16 | rows;
+ box_desc->row_len = burst << 16 | burst;
+
+ *incr_addr += burst * rows;
+ remainder -= burst * rows;
+ desc += sizeof(*box_desc);
+ }
+
+ /* if leftover bytes, do one single descriptor */
+ if (remainder) {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
+ single_desc->len = remainder;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ desc += sizeof(*single_desc);
+
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+ } else {
+ if (box_desc && sg_is_last(sg))
+ box_desc->cmd |= ADM_CMD_LC;
+ }
+
+ return desc;
+}
+
+/**
+ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_non_fc_descriptors(struct adm_chan *achan,
+ void *desc, struct scatterlist *sg,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ do {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ single_desc->len = (remainder > ADM_MAX_XFER) ?
+ ADM_MAX_XFER : remainder;
+
+ remainder -= single_desc->len;
+ *incr_addr += single_desc->len;
+ desc += sizeof(*single_desc);
+ } while (remainder);
+
+ /* set last command if this is the end of the whole transaction */
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+
+ return desc;
+}
+
+/**
+ * adm_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl, unsigned int sg_len,
+ enum dma_transfer_direction direction, unsigned long flags,
+ void *context)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+ struct scatterlist *sg;
+ u32 i, burst;
+ u32 single_count = 0, box_count = 0, crci = 0;
+ void *desc;
+ u32 *cple;
+ int blk_size = 0;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(adev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /*
+ * get burst value from slave configuration
+ */
+ burst = (direction == DMA_MEM_TO_DEV) ?
+ achan->slave.dst_maxburst :
+ achan->slave.src_maxburst;
+
+ /* if using flow control, validate burst and crci values */
+ if (achan->slave.device_fc) {
+
+ blk_size = adm_get_blksize(burst);
+ if (blk_size < 0) {
+ dev_err(adev->dev, "invalid burst value: %d\n",
+ burst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ crci = achan->slave.slave_id & 0xf;
+ if (!crci || achan->slave.slave_id > 0x1f) {
+ dev_err(adev->dev, "invalid crci value\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* iterate through sgs and compute allocation size of structures */
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (achan->slave.device_fc) {
+ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
+ ADM_MAX_ROWS);
+ if (sg_dma_len(sg) % burst)
+ single_count++;
+ } else {
+ single_count += DIV_ROUND_UP(sg_dma_len(sg),
+ ADM_MAX_XFER);
+ }
+ }
+
+ async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
+ if (!async_desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (crci)
+ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
+ ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->crci = crci;
+ async_desc->blk_size = blk_size;
+ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
+ box_count * sizeof(struct adm_desc_hw_box) +
+ sizeof(*cple) + 2 * ADM_DESC_ALIGN;
+
+ async_desc->cpl = dma_alloc_writecombine(adev->dev, async_desc->dma_len,
+ &async_desc->dma_addr, GFP_NOWAIT);
+
+ if (!async_desc->cpl) {
+ kfree(async_desc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ async_desc->adev = adev;
+
+ /* both command list entry and descriptors must be 8 byte aligned */
+ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
+ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
+
+ /* init cmd list */
+ *cple = ADM_CPLE_LP;
+ *cple |= (desc - async_desc->cpl + async_desc->dma_addr) >> 3;
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ async_desc->length += sg_dma_len(sg);
+
+ if (achan->slave.device_fc)
+ desc = adm_process_fc_descriptors(achan, desc, sg, crci,
+ burst, direction);
+ else
+ desc = adm_process_non_fc_descriptors(achan, desc, sg,
+ direction);
+ }
+
+ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
+}
+
+/**
+ * adm_terminate_all - terminate all transactions on a channel
+ * @achan: adm dma channel
+ *
+ * Dequeues and frees all transactions, aborts current transaction
+ * No callbacks are done
+ *
+ */
+static int adm_terminate_all(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ vchan_get_all_descriptors(&achan->vc, &head);
+
+ /* send flush command to terminate current transaction */
+ writel_relaxed(BIT(31),
+ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&achan->vc, &head);
+
+ return 0;
+}
+
+static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&achan->vc.lock, flag);
+ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ spin_unlock_irqrestore(&achan->vc.lock, flag);
+
+ return 0;
+}
+
+/**
+ * adm_start_dma - start next transaction
+ * @achan - ADM dma channel
+ */
+static void adm_start_dma(struct adm_chan *achan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+
+ lockdep_assert_held(&achan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ /* write next command list out to the CMD FIFO */
+ async_desc = container_of(vd, struct adm_async_desc, vd);
+ achan->curr_txd = async_desc;
+
+ /* reset channel error */
+ achan->error = 0;
+
+ if (!achan->initialized) {
+ /* enable interrupts */
+ writel(ADM_CH_CONF_SHADOW_EN |
+ ADM_CH_CONF_PERM_MPU_CONF |
+ ADM_CH_CONF_MPU_DISABLE |
+ ADM_CH_CONF_SEC_DOMAIN(adev->ee),
+ adev->regs + ADM_CH_CONF(achan->id));
+
+ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
+ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ achan->initialized = 1;
+ }
+
+ /* set the crci block size if this transaction requires CRCI */
+ if (async_desc->crci) {
+ writel(async_desc->mux | async_desc->blk_size,
+ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
+ }
+
+ /* make sure IRQ enable doesn't get reordered */
+ wmb();
+
+ /* write next command list out to the CMD FIFO */
+ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
+ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
+}
+
+/**
+ * adm_dma_irq - irq handler for ADM controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t adm_dma_irq(int irq, void *data)
+{
+ struct adm_device *adev = data;
+ u32 srcs, i;
+ struct adm_async_desc *async_desc;
+ unsigned long flags;
+
+ srcs = readl_relaxed(adev->regs +
+ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ struct adm_chan *achan = &adev->channels[i];
+ u32 status, result;
+
+ if (srcs & BIT(i)) {
+ status = readl_relaxed(adev->regs +
+ ADM_CH_STATUS_SD(i, adev->ee));
+
+ /* if no result present, skip */
+ if (!(status & ADM_CH_STATUS_VALID))
+ continue;
+
+ result = readl_relaxed(adev->regs +
+ ADM_CH_RSLT(i, adev->ee));
+
+ /* no valid results, skip */
+ if (!(result & ADM_CH_RSLT_VALID))
+ continue;
+
+ /* flag error if transaction was flushed or failed */
+ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
+ achan->error = 1;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ async_desc = achan->curr_txd;
+
+ achan->curr_txd = NULL;
+
+ if (async_desc)
+ vchan_cookie_complete(&async_desc->vd);
+
+ /* kick off next DMA */
+ adm_start_dma(achan);
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adm_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ vd = vchan_find_desc(&achan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct adm_async_desc, vd)->length;
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ /*
+ * residue is either the full length if it is in the issued list, or 0
+ * if it is in progress. We have no reliable way of determining
+ * anything inbetween
+ */
+ dma_set_residue(txstate, residue);
+
+ if (achan->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+/**
+ * adm_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Issues all pending transactions and starts DMA
+ */
+static void adm_issue_pending(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
+ adm_start_dma(achan);
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+}
+
+/**
+ * adm_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void adm_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct adm_async_desc *async_desc = container_of(vd,
+ struct adm_async_desc, vd);
+
+ dma_free_writecombine(async_desc->adev->dev, async_desc->dma_len,
+ async_desc->cpl, async_desc->dma_addr);
+ kfree(async_desc);
+}
+
+static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
+ u32 index)
+{
+ achan->id = index;
+ achan->adev = adev;
+
+ vchan_init(&achan->vc, &adev->common);
+ achan->vc.desc_free = adm_dma_free_desc;
+}
+
+static int adm_dma_probe(struct platform_device *pdev)
+{
+ struct adm_device *adev;
+ struct resource *iores;
+ int ret;
+ u32 i;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev = &pdev->dev;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adev->regs = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(adev->regs))
+ return PTR_ERR(adev->regs);
+
+ adev->irq = platform_get_irq(pdev, 0);
+ if (adev->irq < 0)
+ return adev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
+ if (ret) {
+ dev_err(adev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ adev->core_clk = devm_clk_get(adev->dev, "core");
+ if (IS_ERR(adev->core_clk))
+ return PTR_ERR(adev->core_clk);
+
+ ret = clk_prepare_enable(adev->core_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable core clock\n");
+ return ret;
+ }
+
+ adev->iface_clk = devm_clk_get(adev->dev, "iface");
+ if (IS_ERR(adev->iface_clk)) {
+ ret = PTR_ERR(adev->iface_clk);
+ goto err_disable_core_clk;
+ }
+
+ ret = clk_prepare_enable(adev->iface_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable iface clock\n");
+ goto err_disable_core_clk;
+ }
+
+ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
+ sizeof(*adev->channels), GFP_KERNEL);
+
+ if (!adev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++)
+ adm_channel_init(adev, &adev->channels[i], i);
+
+ /* reset CRCIs */
+ for (i = 0; i < 16; i++)
+ writel(ADM_CRCI_CTL_RST, adev->regs +
+ ADM_CRCI_CTL(i, adev->ee));
+
+ /* configure client interfaces */
+ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
+ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
+ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
+ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
+ adev->regs + ADM_GP_CTL);
+
+ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
+ 0, "adm_dma", adev);
+ if (ret)
+ goto err_disable_clks;
+
+ platform_set_drvdata(pdev, adev);
+
+ adev->common.dev = adev->dev;
+ adev->common.dev->dma_parms = &adev->dma_parms;
+
+ /* set capabilities */
+ dma_cap_zero(adev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
+ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.device_free_chan_resources = adm_free_chan;
+ adev->common.device_prep_slave_sg = adm_prep_slave_sg;
+ adev->common.device_issue_pending = adm_issue_pending;
+ adev->common.device_tx_status = adm_tx_status;
+ adev->common.device_terminate_all = adm_terminate_all;
+ adev->common.device_config = adm_slave_config;
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(adev->dev, "failed to register dma async device\n");
+ goto err_disable_clks;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id,
+ &adev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&adev->common);
+err_disable_clks:
+ clk_disable_unprepare(adev->iface_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(adev->core_clk);
+
+ return ret;
+}
+
+static int adm_dma_remove(struct platform_device *pdev)
+{
+ struct adm_device *adev = platform_get_drvdata(pdev);
+ struct adm_chan *achan;
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->common);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ achan = &adev->channels[i];
+
+ /* mask IRQs for this channel/EE pair */
+ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ adm_terminate_all(&adev->channels[i].vc.chan);
+ }
+
+ devm_free_irq(adev->dev, adev->irq, adev);
+
+ clk_disable_unprepare(adev->core_clk);
+ clk_disable_unprepare(adev->iface_clk);
+
+ return 0;
+}
+
+static const struct of_device_id adm_of_match[] = {
+ { .compatible = "qcom,adm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adm_of_match);
+
+static struct platform_driver adm_dma_driver = {
+ .probe = adm_dma_probe,
+ .remove = adm_dma_remove,
+ .driver = {
+ .name = "adm-dma-engine",
+ .of_match_table = adm_of_match,
+ },
+};
+
+module_platform_driver(adm_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index b8a521741418..3a0455496efe 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,8 +14,11 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
+ifdef CONFIG_64BIT
obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
+else
obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
+endif
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-y += broadcom/
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c
index 29e6850665eb..0a7bcfa44409 100644
--- a/drivers/firmware/qcom_scm-32.c
+++ b/drivers/firmware/qcom_scm-32.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved.
+>>>>>>> firmware: qcom: scm: Split out 32-bit specific SCM code
* Copyright (C) 2015 Linaro Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -23,11 +24,19 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
#include "qcom_scm.h"
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+
#define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
#define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
#define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
@@ -38,6 +47,15 @@
#define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
#define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+
struct qcom_scm_entry {
int flag;
void *entry;
@@ -168,23 +186,6 @@ static inline void *qcom_scm_get_response_buffer(const struct qcom_scm_response
return (void *)rsp + le32_to_cpu(rsp->buf_offset);
}
-static int qcom_scm_remap_error(int err)
-{
- pr_err("qcom_scm_call failed with error code %d\n", err);
- switch (err) {
- case QCOM_SCM_ERROR:
- return -EIO;
- case QCOM_SCM_EINVAL_ADDR:
- case QCOM_SCM_EINVAL_ARG:
- return -EINVAL;
- case QCOM_SCM_EOPNOTSUPP:
- return -EOPNOTSUPP;
- case QCOM_SCM_ENOMEM:
- return -ENOMEM;
- }
- return -EINVAL;
-}
-
static u32 smc(u32 cmd_addr)
{
int context_id;
@@ -499,3 +500,386 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
req, req_cnt * sizeof(*req), resp, sizeof(*resp));
}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ u32 ret_val;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &peripheral, sizeof(peripheral),
+ &ret_val, sizeof(ret_val));
+
+ return ret ? false : !!ret_val;
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+ u32 scm_ret;
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 image_addr;
+ } request;
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(dev, size, &mdata_phys, GFP_KERNEL);
+ if (!mdata_buf) {
+ pr_err("Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ request.proc = peripheral;
+ request.image_addr = mdata_phys;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ dma_free_coherent(dev, size, mdata_buf, mdata_phys);
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ u32 scm_ret;
+ int ret;
+ struct pas_init_image_req {
+ u32 proc;
+ u32 addr;
+ u32 len;
+ } request;
+
+ request.proc = peripheral;
+ request.addr = addr;
+ request.len = size;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &request, sizeof(request),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ u32 scm_ret;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &peripheral, sizeof(peripheral),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ u32 scm_ret;
+ int ret;
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &peripheral, sizeof(peripheral),
+ &scm_ret, sizeof(scm_ret));
+
+ return ret ? : scm_ret;
+}
+
+
+int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ int ret;
+ u32 scm_ret = 0;
+ struct {
+ u32 proc;
+ u32 image_addr;
+ } req;
+
+ req.proc = proc;
+ req.image_addr = image_addr;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ u32 scm_ret = 0;
+ int ret;
+ struct {
+ u32 proc;
+ u32 start_addr;
+ u32 len;
+ } req;
+
+ req.proc = proc;
+ req.start_addr = start_addr;
+ req.len = len;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ u32 scm_ret = 0;
+ int ret;
+ u32 req;
+
+ req = proc;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ u32 scm_ret = 0;
+ int ret;
+ u32 req;
+
+ req = proc;
+
+ ret = qcom_scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0x0c
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct {
+ u32 id;
+ u32 cb_num;
+ u32 buff;
+ u32 len;
+ } req;
+ int resp = 0;
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS,
+ &req, sizeof(req), &resp, 1);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct {
+ u32 size;
+ u32 spare;
+ } req;
+ int retval;
+
+ req.size = size;
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE,
+ &req, sizeof(req), &retval, sizeof(retval));
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct {
+ u32 spare;
+ } req;
+
+ req.spare = spare;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &req,
+ sizeof(req), psize, sizeof(psize));
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct {
+ u32 addr;
+ u32 size;
+ u32 spare;
+ } req = {0};
+ int ret, ptbl_ret = 0;
+
+ req.addr = addr;
+ req.size = size;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &req,
+ sizeof(req), &ptbl_ret, sizeof(ptbl_ret));
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct {
+ struct {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+ } plist;
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ u32 resp;
+ int ret;
+
+ req.plist.list = list;
+ req.plist.list_size = list_size;
+ req.plist.size = size;
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = info_size;
+ req.flags = flags;
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2, &req, sizeof(req),
+ &resp, sizeof(resp));
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct {
+ struct {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+ } info;
+ unsigned int flags;
+ } req;
+ int ret, scm_ret;
+
+ req.info.id = id;
+ req.info.ctx_id = ctx_id;
+ req.info.va = va;
+ req.info.size = size;
+ req.flags = flags;
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ int ret;
+
+ if (__qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+ u32 version;
+
+ if (!qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
+ sizeof(feat), &version, sizeof(version)))
+ return version;
+ }
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct {
+ u32 device_id;
+ u32 spare;
+ } req;
+ int ret, scm_ret = 0;
+
+ req.device_id = device_id;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &req, sizeof(req),
+ scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct {
+ u32 state;
+ u32 spare;
+ } req;
+ int scm_ret = 0;
+ int ret;
+
+ req.state = state;
+ req.spare = spare;
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct {
+ u32 cp_start;
+ u32 cp_size;
+ u32 cp_nonpixel_start;
+ u32 cp_nonpixel_size;
+ } req;
+ int ret, scm_ret;
+
+ req.cp_start = start;
+ req.cp_size = size;
+ req.cp_nonpixel_start = nonpixel_start;
+ req.cp_nonpixel_size = nonpixel_size;
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &req,
+ sizeof(req), &scm_ret, sizeof(scm_ret));
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
index bb6555f6d63b..b3a9784c15ef 100644
--- a/drivers/firmware/qcom_scm-64.c
+++ b/drivers/firmware/qcom_scm-64.c
@@ -8,56 +8,869 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
*/
-#include <linux/io.h>
-#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <linux/qcom_scm.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+#include <asm/smp_plat.h>
+
+#include "qcom_scm.h"
+
+#define QCOM_SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000)
+
+#define MAX_QCOM_SCM_ARGS 10
+#define MAX_QCOM_SCM_RETS 3
+
+enum qcom_scm_arg_types {
+ QCOM_SCM_VAL,
+ QCOM_SCM_RO,
+ QCOM_SCM_RW,
+ QCOM_SCM_BUFVAL,
+};
+
+#define QCOM_SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+ (((a) & 0xff) << 4) | \
+ (((b) & 0xff) << 6) | \
+ (((c) & 0xff) << 8) | \
+ (((d) & 0xff) << 10) | \
+ (((e) & 0xff) << 12) | \
+ (((f) & 0xff) << 14) | \
+ (((g) & 0xff) << 16) | \
+ (((h) & 0xff) << 18) | \
+ (((i) & 0xff) << 20) | \
+ (((j) & 0xff) << 22) | \
+ (num & 0xffff))
+
+#define QCOM_SCM_ARGS(...) QCOM_SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
/**
- * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
- *
- * Set the cold boot address of the cpus. Any cpu outside the supported
- * range would be removed from the cpu present mask.
+ * struct qcom_scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @ret: The values returned by the secure syscall
+ * @extra_arg_buf: The buffer containing extra arguments
+ (that don't fit in available registers)
+ * @x5: The 4rd argument to the secure syscall or physical address of
+ extra_arg_buf
*/
-int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+struct qcom_scm_desc {
+ u32 arginfo;
+ u64 args[MAX_QCOM_SCM_ARGS];
+ u64 ret[MAX_QCOM_SCM_RETS];
+
+ /* private */
+ void *extra_arg_buf;
+ u64 x5;
+};
+
+
+#define QCOM_SCM_ENOMEM -5
+#define QCOM_SCM_EOPNOTSUPP -4
+#define QCOM_SCM_EINVAL_ADDR -3
+#define QCOM_SCM_EINVAL_ARG -2
+#define QCOM_SCM_ERROR -1
+#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_EBUSY -55
+#define QCOM_SCM_V2_EBUSY -12
+
+static DEFINE_MUTEX(qcom_scm_lock);
+
+#define QCOM_SCM_EBUSY_WAIT_MS 30
+#define QCOM_SCM_EBUSY_MAX_RETRY 20
+
+#define N_EXT_QCOM_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_QCOM_SCM_ARGS - N_EXT_QCOM_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+#define IS_CALL_AVAIL_CMD 1
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+
+int __qcom_scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
+{
+ register u64 r0 asm("r0") = x0;
+ register u64 r1 asm("r1") = x1;
+ register u64 r2 asm("r2") = x2;
+ register u64 r3 asm("r3") = x3;
+ register u64 r4 asm("r4") = x4;
+ register u64 r5 asm("r5") = x5;
+ register u64 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
+}
+
+int __qcom_scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+ u64 *ret1, u64 *ret2, u64 *ret3)
{
- return -ENOTSUPP;
+ register u32 r0 asm("r0") = w0;
+ register u32 r1 asm("r1") = w1;
+ register u32 r2 asm("r2") = w2;
+ register u32 r3 asm("r3") = w3;
+ register u32 r4 asm("r4") = w4;
+ register u32 r5 asm("r5") = w5;
+ register u32 r6 asm("r6") = 0;
+
+ do {
+ asm volatile(
+ __asmeq("%0", R0_STR)
+ __asmeq("%1", R1_STR)
+ __asmeq("%2", R2_STR)
+ __asmeq("%3", R3_STR)
+ __asmeq("%4", R0_STR)
+ __asmeq("%5", R1_STR)
+ __asmeq("%6", R2_STR)
+ __asmeq("%7", R3_STR)
+ __asmeq("%8", R4_STR)
+ __asmeq("%9", R5_STR)
+ __asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+ ".arch_extension sec\n"
+#endif
+ "smc #0\n"
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+ "r" (r5), "r" (r6)
+ : "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+ "x14", "x15", "x16", "x17");
+
+ } while (r0 == QCOM_SCM_INTERRUPTED);
+
+ if (ret1)
+ *ret1 = r1;
+ if (ret2)
+ *ret2 = r2;
+ if (ret3)
+ *ret3 = r3;
+
+ return r0;
+}
+
+struct qcom_scm_extra_arg {
+ union {
+ u32 args32[N_EXT_QCOM_SCM_ARGS];
+ u64 args64[N_EXT_QCOM_SCM_ARGS];
+ };
+};
+
+static enum qcom_scm_interface_version {
+ QCOM_SCM_UNKNOWN,
+ QCOM_SCM_LEGACY,
+ QCOM_SCM_ARMV8_32,
+ QCOM_SCM_ARMV8_64,
+} qcom_scm_version = QCOM_SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 qcom_scm_version_mask;
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
+ */
+static int allocate_extra_arg_buffer(struct qcom_scm_desc *desc, gfp_t flags)
+{
+ int i, j;
+ struct qcom_scm_extra_arg *argbuf;
+ int arglen = desc->arginfo & 0xf;
+ size_t argbuflen = PAGE_ALIGN(sizeof(struct qcom_scm_extra_arg));
+
+ desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+ if (likely(arglen <= N_REGISTER_ARGS)) {
+ desc->extra_arg_buf = NULL;
+ return 0;
+ }
+
+ argbuf = kzalloc(argbuflen, flags);
+ if (!argbuf) {
+ pr_err("qcom_scm_call: failed to alloc mem for extended argument buffer\n");
+ return -ENOMEM;
+ }
+
+ desc->extra_arg_buf = argbuf;
+
+ j = FIRST_EXT_ARG_IDX;
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args64[i] = desc->args[j++];
+ else
+ for (i = 0; i < N_EXT_QCOM_SCM_ARGS; i++)
+ argbuf->args32[i] = desc->args[j++];
+ desc->x5 = virt_to_phys(argbuf);
+ __flush_dcache_area(argbuf, argbuflen);
+
+ return 0;
}
/**
- * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
- * @entry: Entry point function for the cpus
- * @cpus: The cpumask of cpus that will use the entry point
+ * qcom_scm_call() - Invoke a syscall in the secure world
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
*
- * Set the Linux entry point for the SCM to transfer control to when coming
- * out of a power down. CPU power down may be executed on cpuidle or hotplug.
- */
-int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking qcom_scm_call and invalidated in the cache
+ * immediately after qcom_scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking qcom_scm_call,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by qcom_scm_call; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+static int qcom_scm_call(u32 svc_id, u32 cmd_id, struct qcom_scm_desc *desc)
{
- return -ENOTSUPP;
+ int arglen = desc->arginfo & 0xf;
+ int ret, retry_count = 0;
+ u32 fn_id = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | qcom_scm_version_mask;
+
+ do {
+ mutex_lock(&qcom_scm_lock);
+
+ desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo,
+ desc->args[0], desc->args[1],
+ desc->args[2], desc->x5,
+ &desc->ret[0], &desc->ret[1],
+ &desc->ret[2]);
+ mutex_unlock(&qcom_scm_lock);
+
+ if (ret == QCOM_SCM_V2_EBUSY)
+ msleep(QCOM_SCM_EBUSY_WAIT_MS);
+ } while (ret == QCOM_SCM_V2_EBUSY && (retry_count++ < QCOM_SCM_EBUSY_MAX_RETRY));
+
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return 0;
}
/**
- * qcom_scm_cpu_power_down() - Power down the cpu
- * @flags - Flags to flush cache
+ * qcom_scm_call_atomic() - Invoke a syscall in the secure world
*
- * This is an end point to power down cpu. If there was a pending interrupt,
- * the control would return from this function, otherwise, the cpu jumps to the
- * warm boot entry point set for this cpu upon reset.
+ * Similar to qcom_scm_call except that this can be invoked in atomic context.
+ * There is also no retry mechanism implemented. Please ensure that the
+ * secure world syscall can be executed in such a context and can complete
+ * in a timely manner.
*/
+static int qcom_scm_call_atomic(u32 s, u32 c, struct qcom_scm_desc *desc)
+{
+ int arglen = desc->arginfo & 0xf;
+ int ret;
+ u32 fn_id = QCOM_SCM_SIP_FNID(s, c);
+ u64 x0;
+
+ ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+ if (ret)
+ return ret;
+
+ x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | qcom_scm_version_mask;
+
+ pr_debug("qcom_scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5);
+
+ if (qcom_scm_version == QCOM_SCM_ARMV8_64)
+ ret = __qcom_scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ else
+ ret = __qcom_scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
+ desc->args[1], desc->args[2],
+ desc->x5, &desc->ret[0],
+ &desc->ret[1], &desc->ret[2]);
+ if (ret < 0)
+ pr_err("qcom_scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+ x0, desc->arginfo, desc->args[0], desc->args[1],
+ desc->args[2], desc->x5, ret, desc->ret[0],
+ desc->ret[1], desc->ret[2]);
+
+ if (arglen > N_REGISTER_ARGS)
+ kfree(desc->extra_arg_buf);
+ if (ret < 0)
+ return qcom_scm_remap_error(ret);
+ return ret;
+}
+
+static int qcom_scm_set_boot_addr(void *entry, const cpumask_t *cpus, int flags)
+{
+ struct qcom_scm_desc desc = {0};
+ unsigned int cpu = cpumask_first(cpus);
+ u64 mpidr_el1 = cpu_logical_map(cpu);
+
+ /* For now we assume only a single cpu is set in the mask */
+ WARN_ON(cpumask_weight(cpus) != 1);
+
+ if (mpidr_el1 & ~MPIDR_HWID_BITMASK) {
+ pr_err("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ desc.args[0] = virt_to_phys(entry);
+ desc.args[1] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 0));
+ desc.args[2] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 1));
+ desc.args[3] = BIT(MPIDR_AFFINITY_LEVEL(mpidr_el1, 2));
+ desc.args[4] = ~0ULL;
+ desc.args[5] = QCOM_SCM_FLAG_HLOS | flags;
+ desc.arginfo = QCOM_SCM_ARGS(6);
+
+ return qcom_scm_call(QCOM_SCM_SVC_BOOT, QCOM_SCM_BOOT_ADDR_MC, &desc);
+}
+
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_COLDBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ int flags = QCOM_SCM_FLAG_WARMBOOT_MC;
+
+ return qcom_scm_set_boot_addr(entry, cpus, flags);
+}
+
void __qcom_scm_cpu_power_down(u32 flags)
{
+ struct qcom_scm_desc desc = {0};
+ desc.args[0] = QCOM_SCM_CMD_CORE_HOTPLUGGED |
+ (flags & QCOM_SCM_FLUSH_FLAG_MASK);
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ qcom_scm_call_atomic(QCOM_SCM_SVC_BOOT, QCOM_SCM_CMD_TERMINATE_PC, &desc);
}
int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.arginfo = QCOM_SCM_ARGS(1);
+ desc.args[0] = QCOM_SCM_SIP_FNID(svc_id, cmd_id);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &desc);
+
+ if (ret)
+ return ret;
+
+ return desc.ret[0];
}
int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
{
- return -ENOTSUPP;
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
+ return -ERANGE;
+
+ desc.args[0] = req[0].addr;
+ desc.args[1] = req[0].val;
+ desc.args[2] = req[1].addr;
+ desc.args[3] = req[1].val;
+ desc.args[4] = req[2].addr;
+ desc.args[5] = req[2].val;
+ desc.args[6] = req[3].addr;
+ desc.args[7] = req[3].val;
+ desc.args[8] = req[4].addr;
+ desc.args[9] = req[4].val;
+ desc.arginfo = QCOM_SCM_ARGS(10);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP, &desc);
+ *resp = desc.ret[0];
+
+ return ret;
+}
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+ &desc);
+
+ return ret ? false : !!desc.ret[0];
+}
+
+int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+ dma_addr_t mdata_phys;
+ void *mdata_buf;
+
+dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ /*
+ * During the scm call memory protection will be enabled for the meta
+ * data blob, so make sure it's physically contiguous, 4K aligned and
+ * non-cachable to avoid XPU violations.
+ */
+ mdata_buf = dma_alloc_coherent(dev, size, &mdata_phys, GFP_KERNEL);
+ if (!mdata_buf) {
+ pr_err("Allocation of metadata buffer failed.\n");
+ return -ENOMEM;
+ }
+ memcpy(mdata_buf, metadata, size);
+
+ desc.args[0] = peripheral;
+ desc.args[1] = mdata_phys;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ dma_free_coherent(dev, size, mdata_buf, mdata_phys);
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.args[1] = addr;
+ desc.args[2] = size;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+ int ret;
+ struct qcom_scm_desc desc = {0};
+ u32 scm_ret;
+
+ desc.args[0] = peripheral;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+ &desc);
+ scm_ret = desc.ret[0];
+
+ return ret ? : scm_ret;
+}
+
+int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.args[1] = image_addr;
+ desc.arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.args[1] = start_addr;
+ desc.args[2] = len;
+ desc.arginfo = QCOM_SCM_ARGS(3);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+int __qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = proc;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(QCOM_SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &desc);
+ scm_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ return scm_ret;
+}
+
+#define SCM_SVC_UTIL 0x3
+#define SCM_SVC_MP 0xc
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0xc
+
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0xb
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0xc
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+
+int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = context;
+ desc.args[2] = addr;
+ desc.args[3] = len;
+ desc.arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_RW, QCOM_SCM_VAL);
+
+ return qcom_scm_call(SCM_SVC_UTIL, IOMMU_DUMP_SMMU_FAULT_REGS, &desc);
+}
+
+int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = size;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SET_CP_POOL_SIZE, &desc);
+}
+
+int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ desc.args[0] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_SIZE, &desc);
+
+ psize[0] = desc.ret[0];
+ psize[1] = desc.ret[1];
+
+ return ret;
+}
+
+int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+ u64 ptbl_ret;
+
+ desc.args[0] = addr;
+ desc.args[1] = size;
+ desc.args[2] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_PTBL_INIT, &desc);
+
+ ptbl_ret = desc.ret[0];
+
+ if (ret)
+ return ret;
+
+ if (ptbl_ret)
+ return ptbl_ret;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+ u32 resp;
+ int ret;
+
+ desc.args[0] = list;
+ desc.args[1] = list_size;
+ desc.args[2] = size;
+ desc.args[3] = id;
+ desc.args[4] = ctx_id;
+ desc.args[5] = va;
+ desc.args[6] = info_size;
+ desc.args[7] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(8, QCOM_SCM_RW, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL,
+ QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL, QCOM_SCM_VAL);
+
+ ret = qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_MAP2_FLAT, &desc);
+
+ resp = desc.ret[0];
+
+ if (ret || resp)
+ return -EINVAL;
+
+ return 0;
+}
+
+int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags)
+{
+ struct qcom_scm_desc desc = {0};
+
+ desc.args[0] = id;
+ desc.args[1] = ctx_id;
+ desc.args[2] = va;
+ desc.args[3] = size;
+ desc.args[4] = flags;
+ desc.arginfo = QCOM_SCM_ARGS(5);
+
+ return qcom_scm_call(SCM_SVC_MP, IOMMU_SECURE_UNMAP2_FLAT, &desc);
+}
+
+int __qcom_scm_get_feat_version(u32 feat)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret;
+
+ ret = __qcom_scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+ if (ret <= 0)
+ return 0;
+
+ desc.args[0] = feat;
+ desc.arginfo = QCOM_SCM_ARGS(1);
+
+ ret = qcom_scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &desc);
+ if (!ret)
+ return desc.ret[0];
+
+ return 0;
+}
+
+#define RESTORE_SEC_CFG 2
+int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret = 0;
+
+ desc.args[0] = device_id;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_VIDEO_SET_STATE 0xa
+int __qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = state;
+ desc.args[1] = spare;
+ desc.arginfo = QCOM_SCM_ARGS(2);
+
+ ret = qcom_scm_call(SCM_SVC_BOOT, TZBSP_VIDEO_SET_STATE, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
+
+int __qcom_scm_mem_protect_video_var(u32 start, u32 size, u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ struct qcom_scm_desc desc = {0};
+ int ret, scm_ret;
+
+ desc.args[0] = start;
+ desc.args[1] = size;
+ desc.args[2] = nonpixel_start;
+ desc.args[3] = nonpixel_size;
+ desc.arginfo = QCOM_SCM_ARGS(4);
+
+ ret = qcom_scm_call(SCM_SVC_MP, TZBSP_MEM_PROTECT_VIDEO_VAR, &desc);
+
+ scm_ret = desc.ret[0];
+
+ if (ret || scm_ret)
+ return ret ? ret : -EINVAL;
+
+ return 0;
+}
+
+#define QCOM_SCM_SVC_INFO 0x6
+static int __init qcom_scm_init(void)
+{
+ int ret;
+ u64 ret1 = 0, x0;
+
+ /* First try a SMC64 call */
+ qcom_scm_version = QCOM_SCM_ARMV8_64;
+ x0 = QCOM_SCM_SIP_FNID(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+ ret = __qcom_scm_call_armv8_64(x0 | SMC64_MASK, QCOM_SCM_ARGS(1), x0, 0, 0, 0,
+ &ret1, NULL, NULL);
+ if (ret || !ret1) {
+ /* Try SMC32 call */
+ ret1 = 0;
+ ret = __qcom_scm_call_armv8_32(x0, QCOM_SCM_ARGS(1), x0, 0, 0,
+ 0, &ret1, NULL, NULL);
+ if (ret || !ret1)
+ qcom_scm_version = QCOM_SCM_LEGACY;
+ else
+ qcom_scm_version = QCOM_SCM_ARMV8_32;
+ } else
+ qcom_scm_version_mask = SMC64_MASK;
+
+ pr_debug("qcom_scm_call: qcom_scm version is %x, mask is %x\n",
+ qcom_scm_version, qcom_scm_version_mask);
+
+ return 0;
}
+early_initcall(qcom_scm_init);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 45c008d68891..5fff0858f7ba 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -16,6 +16,7 @@
* 02110-1301, USA.
*/
+#include <linux/platform_device.h>
#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/types.h>
@@ -94,3 +95,177 @@ int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
return __qcom_scm_hdcp_req(req, req_cnt, resp);
}
EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ * available for the given peripherial
+ * @peripheral: peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+ int ret;
+
+ ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_PIL,
+ QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+ if (ret <= 0)
+ return false;
+
+ return __qcom_scm_pas_supported(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ * state machine for a given peripheral, using the
+ * metadata
+ * @peripheral: peripheral id
+ * @metadata: pointer to memory containing ELF header, program header table
+ * and optional blob of data used for authenticating the metadata
+ * and the rest of the firmware
+ * @size: size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size)
+{
+ return __qcom_scm_pas_init_image(dev, peripheral, metadata, size);
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ * for firmware loading
+ * @peripheral: peripheral id
+ * @addr: start address of memory area to prepare
+ * @size: size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+ return __qcom_scm_pas_mem_setup(peripheral, addr, size);
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ * and reset the remote processor
+ * @peripheral: peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+ return __qcom_scm_pas_auth_and_reset(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+ return __qcom_scm_pas_shutdown(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+int qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr)
+{
+ return __qcom_scm_pil_init_image_cmd(proc, image_addr);
+}
+EXPORT_SYMBOL(qcom_scm_pil_init_image_cmd);
+
+int qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len)
+{
+ return __qcom_scm_pil_mem_setup_cmd(proc, start_addr, len);
+}
+EXPORT_SYMBOL(qcom_scm_pil_mem_setup_cmd);
+
+int qcom_scm_pil_auth_and_reset_cmd(u32 proc)
+{
+ return __qcom_scm_pil_auth_and_reset_cmd(proc);
+}
+EXPORT_SYMBOL(qcom_scm_pil_auth_and_reset_cmd);
+
+int qcom_scm_pil_shutdown_cmd(u32 proc)
+{
+ return __qcom_scm_pil_shutdown_cmd(proc);
+}
+EXPORT_SYMBOL(qcom_scm_pil_shutdown_cmd);
+
+int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr, u32 len)
+{
+ return __qcom_scm_iommu_dump_fault_regs(id, context, addr, len);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_dump_fault_regs);
+
+int qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_set_cp_pool_size(size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_set_cp_pool_size);
+
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2])
+{
+ return __qcom_scm_iommu_secure_ptbl_size(spare, psize);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
+
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+{
+ return __qcom_scm_iommu_secure_ptbl_init(addr, size, spare);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
+
+int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va, u32 info_size,
+ u32 flags)
+{
+ return __qcom_scm_iommu_secure_map(list, list_size, size, id,
+ ctx_id, va, info_size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_map);
+
+int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va, u32 size, u32 flags)
+{
+ return __qcom_scm_iommu_secure_unmap(id, ctx_id, va, size, flags);
+}
+EXPORT_SYMBOL(qcom_scm_iommu_secure_unmap);
+
+int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return __qcom_scm_is_call_available(svc_id, cmd_id);
+}
+EXPORT_SYMBOL(qcom_scm_is_call_available);
+
+int qcom_scm_get_feat_version(u32 feat)
+{
+ return __qcom_scm_get_feat_version(feat);
+}
+EXPORT_SYMBOL(qcom_scm_get_feat_version);
+
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+{
+ return __qcom_scm_restore_sec_cfg(device_id, spare);
+}
+EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
+
+int qcom_scm_set_video_state(u32 state, u32 spare)
+{
+ return __qcom_scm_set_video_state(state, spare);
+}
+EXPORT_SYMBOL(qcom_scm_set_video_state);
+
+int qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size)
+{
+ return __qcom_scm_mem_protect_video_var(start, size, nonpixel_start,
+ nonpixel_size);
+}
+EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
index 2cce75c08b99..5d7f0ef6fee7 100644
--- a/drivers/firmware/qcom_scm.h
+++ b/drivers/firmware/qcom_scm.h
@@ -36,6 +36,18 @@ extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+#define QCOM_SCM_SVC_PIL 0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD 0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD 0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD 0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD 0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD 0x7
+extern bool __qcom_scm_pas_supported(u32 peripheral);
+extern int __qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size);
+extern int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int __qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int __qcom_scm_pas_shutdown(u32 peripheral);
+
/* common error codes */
#define QCOM_SCM_ENOMEM -5
#define QCOM_SCM_EOPNOTSUPP -4
@@ -43,5 +55,65 @@ extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
#define QCOM_SCM_EINVAL_ARG -2
#define QCOM_SCM_ERROR -1
#define QCOM_SCM_INTERRUPTED 1
+#define QCOM_SCM_EBUSY -55
+#define QCOM_SCM_V2_EBUSY -12
+
+
+static inline int qcom_scm_remap_error(int err)
+{
+ switch (err) {
+ case QCOM_SCM_ERROR:
+ return -EIO;
+ case QCOM_SCM_EINVAL_ADDR:
+ case QCOM_SCM_EINVAL_ARG:
+ return -EINVAL;
+ case QCOM_SCM_EOPNOTSUPP:
+ return -EOPNOTSUPP;
+ case QCOM_SCM_ENOMEM:
+ return -ENOMEM;
+ case QCOM_SCM_EBUSY:
+ return QCOM_SCM_EBUSY;
+ case QCOM_SCM_V2_EBUSY:
+ return QCOM_SCM_V2_EBUSY;
+ }
+ return -EINVAL;
+}
+
+enum scm_cmd {
+ PAS_INIT_IMAGE_CMD = 1,
+ PAS_MEM_SETUP_CMD,
+ PAS_AUTH_AND_RESET_CMD = 5,
+ PAS_SHUTDOWN_CMD,
+};
+
+#define SCM_SVC_BOOT 0x1
+#define SCM_SVC_PIL 0x2
+#define SCM_SVC_INFO 0x6
+
+#define GET_FEAT_VERSION_CMD 3
+
+extern int __qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr);
+extern int __qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len);
+extern int __qcom_scm_pil_auth_and_reset_cmd(u32 proc);
+extern int __qcom_scm_pil_shutdown_cmd(u32 proc);
+
+extern int __qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr,
+ u32 len);
+extern int __qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2]);
+extern int __qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int __qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va,
+ u32 info_size, u32 flags);
+extern int __qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags);
+
+extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int __qcom_scm_get_feat_version(u32 feat);
+extern int __qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int __qcom_scm_set_video_state(u32 state, u32 spare);
+extern int __qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size);
#endif
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8949b3f6f74d..c7bb70a3894e 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -354,6 +354,21 @@ config GPIO_PXA
help
Say yes here to support the PXA GPIO device
+config GPIO_QCOM_SMSM
+ bool "Qualcomm Shared Memory State Machine"
+ depends on QCOM_SMEM
+ help
+ Say yes here to support the Qualcomm Shared Memory State Machine.
+ The state machine is represented by bits in shared memory and is
+ exposed to the system as GPIOs.
+
+config GPIO_QCOM_SMP2P
+ bool "Qualcomm Shared Memory Point to Point support"
+ depends on QCOM_SMEM
+ help
+ Say yes here to support the Qualcomm Shared Memory Point to Point
+ protocol, exposed to the system as GPIOs.
+
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
depends on ARCH_SHMOBILE || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index f79a7c482a99..cc5c4221ccf2 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -75,6 +75,8 @@ obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_QCOM_SMSM) += gpio-qcom-smsm.o
+obj-$(CONFIG_GPIO_QCOM_SMP2P) += gpio-qcom-smp2p.o
obj-$(CONFIG_GPIO_RC5T583) += gpio-rc5t583.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_GPIO_RCAR) += gpio-rcar.o
diff --git a/drivers/gpio/gpio-qcom-smp2p.c b/drivers/gpio/gpio-qcom-smp2p.c
new file mode 100644
index 000000000000..6e4d09a40c4a
--- /dev/null
+++ b/drivers/gpio/gpio-qcom-smp2p.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2015, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/list.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+
+#include <linux/delay.h>
+
+#include <linux/soc/qcom/smem.h>
+
+/*
+ * The Shared Memory Point to Point (SMP2P) protocol facilitates communication
+ * of a single 32-bit value between two processors. Each value has a single
+ * writer (the local side) and a single reader (the remote side). Values are
+ * uniquely identified in the system by the directed edge (local processor ID
+ * to remote processor ID) and a string identifier.
+ *
+ * Each processor is responsible for creating the outgoing SMEM items and each
+ * item is writable by the local processor and readable by the remote
+ * processor. By using two separate SMEM items that are single-reader and
+ * single-writer, SMP2P does not require any remote locking mechanisms.
+ *
+ * The driver uses the Linux GPIO and interrupt framework to expose a virtual
+ * GPIO for each outbound entry and a virtual interrupt controller for each
+ * inbound entry.
+ */
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_MAX_ENTRY_NAME 16
+
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+#define SMP2P_MAGIC 0x504d5324
+
+/**
+ * struct smp2p_smem_item - in memory communication structure
+ * @magic: magic number
+ * @version: version - must be 1
+ * @features: features flag - currently unused
+ * @local_pid: processor id of sending end
+ * @remote_pid: processor id of receiving end
+ * @total_entries: number of entries - always SMP2P_MAX_ENTRY
+ * @valid_entries: number of allocated entries
+ * @flags:
+ * @entries: individual communication entries
+ * @name: name of the entry
+ * @value: content of the entry
+ */
+struct smp2p_smem_item {
+ u32 magic;
+ u8 version;
+ unsigned features:24;
+ u16 local_pid;
+ u16 remote_pid;
+ u16 total_entries;
+ u16 valid_entries;
+ u32 flags;
+
+ struct {
+ u8 name[SMP2P_MAX_ENTRY_NAME];
+ u32 value;
+ } entries[SMP2P_MAX_ENTRY];
+} __packed;
+
+/**
+ * struct smp2p_entry - driver context matching one entry
+ * @node: list entry to keep track of allocated entries
+ * @smp2p: reference to the device driver context
+ * @name: name of the entry, to match against smp2p_smem_item
+ * @value: pointer to smp2p_smem_item entry value
+ * @last_value: last handled value
+ * @domain: irq_domain for inbound entries
+ * @irq_enabled:bitmap to track enabled irq bits
+ * @irq_rising: bitmap to mark irq bits for rising detection
+ * @irq_falling:bitmap to mark irq bits for falling detection
+ * @chip: gpio_chip for outbound entries
+ */
+struct smp2p_entry {
+ struct list_head node;
+ struct qcom_smp2p *smp2p;
+
+ const char *name;
+ u32 *value;
+ u32 last_value;
+
+ struct irq_domain *domain;
+ DECLARE_BITMAP(irq_enabled, 32);
+ DECLARE_BITMAP(irq_rising, 32);
+ DECLARE_BITMAP(irq_falling, 32);
+
+ struct gpio_chip chip;
+};
+
+#define SMP2P_INBOUND 0
+#define SMP2P_OUTBOUND 1
+
+/**
+ * struct qcom_smp2p - device driver context
+ * @dev: device driver handle
+ * @in: pointer to the inbound smem item
+ * @smem_items: ids of the two smem items
+ * @valid_entries: already scanned inbound entries
+ * @local_pid: processor id of the inbound edge
+ * @remote_pid: processor id of the outbound edge
+ * @ipc_regmap: regmap for the outbound ipc
+ * @ipc_offset: offset within the regmap
+ * @ipc_bit: bit in regmap@offset to kick to signal remote processor
+ * @inbound: list of inbound entries
+ * @outbound: list of outbound entries
+ */
+struct qcom_smp2p {
+ struct device *dev;
+
+ struct smp2p_smem_item *in;
+ struct smp2p_smem_item *out;
+
+ unsigned smem_items[SMP2P_OUTBOUND + 1];
+
+ unsigned valid_entries;
+
+ unsigned local_pid;
+ unsigned remote_pid;
+
+ struct regmap *ipc_regmap;
+ int ipc_offset;
+ int ipc_bit;
+
+ struct list_head inbound;
+ struct list_head outbound;
+};
+
+static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
+{
+ /* Make sure any updated data is written before the kick */
+ wmb();
+ regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit));
+}
+
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct smp2p_entry *entry;
+ struct qcom_smp2p *smp2p = data;
+ unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned pid = smp2p->remote_pid;
+ size_t size;
+ int irq_pin;
+ u32 status;
+ u32 val;
+ int ret;
+ int i;
+ u8 tmp_name[SMP2P_MAX_ENTRY_NAME];
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ ret = qcom_smem_get(pid, smem_id, (void **)&in, &size);
+ if (ret < 0) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ return IRQ_HANDLED;
+ }
+
+ smp2p->in = in;
+ }
+
+ /* Match newly created entries */
+ for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ memcpy_fromio(tmp_name, in->entries[i].name,
+ SMP2P_MAX_ENTRY_NAME);
+ if (!strcmp(tmp_name, entry->name)) {
+ entry->value = &in->entries[i].value;
+ break;
+ }
+ }
+ }
+ smp2p->valid_entries = i;
+
+ /* Fire interrupts based on any value changes */
+ list_for_each_entry(entry, &smp2p->inbound, node) {
+ /* Ignore entries not yet allocated by the remote side */
+ if (!entry->value)
+ continue;
+
+ val = *entry->value;
+
+ status = val ^ entry->last_value;
+ entry->last_value = val;
+
+ /* No changes of this entry? */
+ if (!status)
+ continue;
+
+ for_each_set_bit(i, entry->irq_enabled, 32) {
+ if (!(status & BIT(i)))
+ continue;
+
+ if (val & BIT(i)) {
+ if (test_bit(i, entry->irq_rising)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ } else {
+ if (test_bit(i, entry->irq_falling)) {
+ irq_pin = irq_find_mapping(entry->domain, i);
+ handle_nested_irq(irq_pin);
+ }
+ }
+
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void smp2p_mask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ clear_bit(irq, entry->irq_enabled);
+}
+
+static void smp2p_unmask_irq(struct irq_data *irqd)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ set_bit(irq, entry->irq_enabled);
+}
+
+static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+ struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
+ irq_hw_number_t irq = irqd_to_hwirq(irqd);
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH))
+ return -EINVAL;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ set_bit(irq, entry->irq_rising);
+ else
+ clear_bit(irq, entry->irq_rising);
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ set_bit(irq, entry->irq_falling);
+ else
+ clear_bit(irq, entry->irq_falling);
+
+ return 0;
+}
+
+static struct irq_chip smp2p_irq_chip = {
+ .name = "smp2p",
+ .irq_mask = smp2p_mask_irq,
+ .irq_unmask = smp2p_unmask_irq,
+ .irq_set_type = smp2p_set_irq_type,
+};
+
+static int smp2p_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct smp2p_entry *entry = d->host_data;
+
+ irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, entry);
+ irq_set_nested_thread(irq, 1);
+
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops smp2p_irq_ops = {
+ .map = smp2p_irq_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static int smp2p_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct smp2p_entry *entry = container_of(chip, struct smp2p_entry, chip);
+
+ if (value)
+ *entry->value |= BIT(offset);
+ else
+ *entry->value &= ~BIT(offset);
+
+ qcom_smp2p_kick(entry->smp2p);
+
+ return 0;
+}
+
+static void smp2p_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ smp2p_gpio_direction_output(chip, offset, value);
+}
+
+static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry);
+ if (!entry->domain) {
+ dev_err(smp2p->dev, "failed to add irq_domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p,
+ struct smp2p_entry *entry,
+ struct device_node *node)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct gpio_chip *chip;
+ int ret;
+
+ /* Allocate an entry from the smem item */
+ memcpy_toio(out->entries[out->valid_entries].name, entry->name, SMP2P_MAX_ENTRY_NAME);
+ out->valid_entries++;
+
+ /* Make the logical entry reference the physical value */
+ entry->value = &out->entries[out->valid_entries].value;
+
+ chip = &entry->chip;
+ chip->base = -1;
+ chip->ngpio = 32;
+ chip->label = entry->name;
+ chip->dev = smp2p->dev;
+ chip->owner = THIS_MODULE;
+ chip->of_node = node;
+
+ chip->set = smp2p_gpio_set;
+ chip->direction_output = smp2p_gpio_direction_output;
+
+ ret = gpiochip_add(chip);
+ if (ret)
+ dev_err(smp2p->dev, "failed register gpiochip\n");
+
+ return 0;
+}
+
+static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out;
+ unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND];
+ unsigned pid = smp2p->remote_pid;
+ int ret;
+
+ ret = qcom_smem_alloc(pid, smem_id, sizeof(*out));
+ if (ret < 0 && ret != -EEXIST) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(smp2p->dev,
+ "unable to allocate local smp2p item\n");
+ return ret;
+ }
+
+ ret = qcom_smem_get(pid, smem_id, (void **)&out, NULL);
+ if (ret < 0) {
+ dev_err(smp2p->dev, "Unable to acquire local smp2p item\n");
+ return ret;
+ }
+
+ memset_io(out, 0, sizeof(*out));
+ out->magic = SMP2P_MAGIC;
+ out->local_pid = smp2p->local_pid;
+ out->remote_pid = smp2p->remote_pid;
+ out->total_entries = SMP2P_MAX_ENTRY;
+ out->valid_entries = 0;
+
+ /*
+ * Make sure the rest of the header is written before we validate the
+ * item by writing a valid version number.
+ */
+ wmb();
+ out->version = 1;
+
+ qcom_smp2p_kick(smp2p);
+
+ smp2p->out = out;
+
+ return 0;
+}
+
+static int smp2p_parse_ipc(struct qcom_smp2p *smp2p)
+{
+ struct device_node *syscon;
+ struct device *dev = smp2p->dev;
+ const char *key;
+ int ret;
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0);
+ if (!syscon) {
+ dev_err(dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ smp2p->ipc_regmap = syscon_node_to_regmap(syscon);
+ if (IS_ERR(smp2p->ipc_regmap))
+ return PTR_ERR(smp2p->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset);
+ if (ret < 0) {
+ dev_err(dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit);
+ if (ret < 0) {
+ dev_err(dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int qcom_smp2p_probe(struct platform_device *pdev)
+{
+ struct smp2p_entry *entry;
+ struct device_node *node;
+ struct qcom_smp2p *smp2p;
+ const char *key;
+ int irq;
+ int ret;
+
+ smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL);
+ if (!smp2p)
+ return -ENOMEM;
+
+ smp2p->dev = &pdev->dev;
+ INIT_LIST_HEAD(&smp2p->inbound);
+ INIT_LIST_HEAD(&smp2p->outbound);
+
+ platform_set_drvdata(pdev, smp2p);
+
+ ret = smp2p_parse_ipc(smp2p);
+ if (ret)
+ return ret;
+
+ key = "qcom,smem";
+ ret = of_property_read_u32_array(pdev->dev.of_node, key,
+ smp2p->smem_items, 2);
+ if (ret)
+ return ret;
+
+ key = "qcom,local-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+ }
+
+ key = "qcom,remote-pid";
+ ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read %s\n", key);
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "unable to acquire smp2p interrupt\n");
+ return irq;
+ }
+
+ ret = qcom_smp2p_alloc_outbound_item(smp2p);
+ if (ret < 0)
+ return ret;
+
+ for_each_available_child_of_node(pdev->dev.of_node, node) {
+ entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ ret = -ENOMEM;
+ goto unwind_interfaces;
+ }
+
+ entry->smp2p = smp2p;
+
+ ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ if (of_property_read_bool(node, "qcom,inbound")) {
+ ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->inbound);
+ } else if (of_property_read_bool(node, "qcom,outbound")) {
+ ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
+ if (ret < 0)
+ goto unwind_interfaces;
+
+ list_add(&entry->node, &smp2p->outbound);
+ } else {
+ dev_err(&pdev->dev, "neither inbound nor outbound\n");
+ ret = -EINVAL;
+ goto unwind_interfaces;
+ }
+ }
+
+ /* Kick the outgoing edge after allocating entries */
+ qcom_smp2p_kick(smp2p);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, qcom_smp2p_intr,
+ IRQF_ONESHOT,
+ "smp2p", (void *)smp2p);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request interrupt\n");
+ goto unwind_interfaces;
+ }
+
+
+ return 0;
+
+unwind_interfaces:
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ gpiochip_remove(&entry->chip);
+
+ smp2p->out->valid_entries = 0;
+
+ return ret;
+}
+
+static int qcom_smp2p_remove(struct platform_device *pdev)
+{
+ struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
+ struct smp2p_entry *entry;
+
+ list_for_each_entry(entry, &smp2p->inbound, node)
+ irq_domain_remove(entry->domain);
+
+ list_for_each_entry(entry, &smp2p->outbound, node)
+ gpiochip_remove(&entry->chip);
+
+ smp2p->out->valid_entries = 0;
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smp2p_of_match[] = {
+ { .compatible = "qcom,smp2p" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match);
+
+static struct platform_driver qcom_smp2p_driver = {
+ .probe = qcom_smp2p_probe,
+ .remove = qcom_smp2p_remove,
+ .driver = {
+ .name = "qcom_smp2p",
+ .of_match_table = qcom_smp2p_of_match,
+ },
+};
+module_platform_driver(qcom_smp2p_driver);
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-qcom-smsm.c b/drivers/gpio/gpio-qcom-smsm.c
new file mode 100644
index 000000000000..3bbe9bb73f84
--- /dev/null
+++ b/drivers/gpio/gpio-qcom-smsm.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2014, Sony Mobile Communications AB.
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/hwspinlock.h>
+#include <linux/regmap.h>
+#include <linux/gpio.h>
+#include <linux/mfd/syscon.h>
+
+#include <linux/delay.h>
+
+#include <linux/soc/qcom/smem.h>
+
+#define SMSM_APPS_STATE 0
+#define SMEM_SMSM_SHARED_STATE 85
+
+#define SMSM_MAX_STATES 8
+
+struct qcom_smsm_state {
+ unsigned state_id;
+ struct gpio_chip chip;
+
+ int irq;
+
+ struct regmap *ipc_regmap;
+ int ipc_bit;
+ int ipc_offset;
+};
+
+struct qcom_smsm {
+ struct device *dev;
+
+ u32 *shared_state;
+ size_t shared_state_size;
+
+ struct qcom_smsm_state states[SMSM_MAX_STATES];
+};
+
+#if 0
+int qcom_smsm_change_state(struct qcom_smsm *smsm, u32 clear_mask, u32 set_mask)
+{
+ u32 state;
+
+ dev_dbg(smsm->dev, "SMSM_APPS_STATE clear 0x%x set 0x%x\n", clear_mask, set_mask);
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ state = readl(&smsm->shared_state[SMSM_APPS_STATE]);
+ state &= ~clear_mask;
+ state |= set_mask;
+ writel(state, &smsm->shared_state[SMSM_APPS_STATE]);
+
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ // qcom_smem_signal(-1, smsm->smem, smsm->signal_offset, smsm->signal_bit);
+
+ return 0;
+}
+EXPORT_SYMBOL(qcom_smsm_change_state);
+#endif
+
+static struct qcom_smsm_state *to_smsm_state(struct gpio_chip *chip)
+{
+ return container_of(chip, struct qcom_smsm_state, chip);
+}
+
+static struct qcom_smsm *to_smsm(struct qcom_smsm_state *state)
+{
+ return container_of(state, struct qcom_smsm, states[state->state_id]);
+}
+
+static int smsm_gpio_direction_input(struct gpio_chip *chip,
+ unsigned offset)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+
+ if (state->state_id == SMSM_APPS_STATE)
+ return -EINVAL;
+ return 0;
+}
+
+static int smsm_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset,
+ int value)
+{
+ struct qcom_smsm_state *ipc_state;
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned word;
+ unsigned bit;
+ u32 val;
+ int i;
+
+ /* Only SMSM_APPS_STATE supports writing */
+ if (state->state_id != SMSM_APPS_STATE)
+ return -EINVAL;
+
+ offset += state->state_id * 32;
+
+ word = ALIGN(offset / 32, 4);
+ bit = offset % 32;
+
+ val = readl(smsm->shared_state + word);
+ if (value)
+ val |= BIT(bit);
+ else
+ val &= ~BIT(bit);
+ writel(val, smsm->shared_state + word);
+
+ /* XXX: send out interrupts */
+ for (i = 0; i < SMSM_MAX_STATES; i++) {
+ ipc_state = &smsm->states[i];
+ if (!ipc_state->ipc_regmap)
+ continue;
+
+ regmap_write(ipc_state->ipc_regmap, ipc_state->ipc_offset, BIT(ipc_state->ipc_bit));
+ }
+
+ dev_err(smsm->dev, "set %d %d\n", offset, value);
+
+ return 0;
+}
+
+static int smsm_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned word;
+ unsigned bit;
+ u32 val;
+
+ offset += state->state_id * 32;
+
+ word = ALIGN(offset / 32, 4);
+ bit = offset % 32;
+
+ val = readl(smsm->shared_state + word);
+
+ return !!(val & BIT(bit));
+}
+
+static void smsm_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ smsm_gpio_direction_output(chip, offset, value);
+}
+
+static int smsm_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return -EINVAL;
+}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+static void smsm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
+{
+ struct qcom_smsm_state *state = to_smsm_state(chip);
+ struct qcom_smsm *smsm = to_smsm(state);
+ unsigned i;
+ u32 val;
+
+ val = readl(smsm->shared_state + state->state_id * 4);
+
+ for (i = 0; i < 32; i++) {
+ if (val & BIT(i))
+ seq_puts(s, "1");
+ else
+ seq_puts(s, "0");
+
+ if (i == 7 || i == 15 || i == 23)
+ seq_puts(s, " ");
+ }
+ seq_puts(s, "\n");
+}
+
+#else
+#define smsm_gpio_dbg_show NULL
+#endif
+
+static struct gpio_chip smsm_gpio_template = {
+ .direction_input = smsm_gpio_direction_input,
+ .direction_output = smsm_gpio_direction_output,
+ .get = smsm_gpio_get,
+ .set = smsm_gpio_set,
+ .to_irq = smsm_gpio_to_irq,
+ .dbg_show = smsm_gpio_dbg_show,
+ .owner = THIS_MODULE,
+};
+
+static int qcom_smsm_probe(struct platform_device *pdev)
+{
+ struct qcom_smsm_state *state;
+ struct device_node *syscon_np;
+ struct device_node *node;
+ struct qcom_smsm *smsm;
+ char *key;
+ u32 sid;
+ int ret;
+
+ smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
+ if (!smsm)
+ return -ENOMEM;
+ smsm->dev = &pdev->dev;
+
+ ret = qcom_smem_alloc(-1, SMEM_SMSM_SHARED_STATE, 8 * sizeof(uint32_t));
+ if (ret < 0 && ret != -EEXIST) {
+ dev_err(&pdev->dev, "unable to allocate shared state entry\n");
+ return ret;
+ }
+
+ ret = qcom_smem_get(-1, SMEM_SMSM_SHARED_STATE,
+ (void**)&smsm->shared_state,
+ &smsm->shared_state_size);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
+ return ret;
+ }
+
+ dev_err(smsm->dev, "SMEM_SMSM_SHARED_STATE: %d, %zu\n", ret, smsm->shared_state_size);
+ print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_OFFSET, 16, 1, smsm->shared_state, smsm->shared_state_size, true);
+
+ for_each_child_of_node(pdev->dev.of_node, node) {
+ key = "reg";
+ ret = of_property_read_u32(node, key, &sid);
+ if (ret || sid >= SMSM_MAX_STATES) {
+ dev_err(&pdev->dev, "smsm state missing %s\n", key);
+ return -EINVAL;
+ }
+ state = &smsm->states[sid];
+ state->state_id = sid;
+
+ state->chip = smsm_gpio_template;
+ state->chip.base = -1;
+ state->chip.dev = &pdev->dev;
+ state->chip.of_node = node;
+ state->chip.label = node->name;
+ state->chip.ngpio = 8 * sizeof(u32);
+ ret = gpiochip_add(&state->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed register gpiochip\n");
+ // goto wooha;
+ }
+
+ /* The remaining properties are only for non-apps state */
+ if (sid == SMSM_APPS_STATE)
+ continue;
+
+ state->irq = irq_of_parse_and_map(node, 0);
+ if (state->irq < 0 && state->irq != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse smsm interrupt\n");
+ return -EINVAL;
+ }
+
+ syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
+ if (!syscon_np) {
+ dev_err(&pdev->dev, "no qcom,ipc node\n");
+ return -ENODEV;
+ }
+
+ state->ipc_regmap = syscon_node_to_regmap(syscon_np);
+ if (IS_ERR(state->ipc_regmap))
+ return PTR_ERR(state->ipc_regmap);
+
+ key = "qcom,ipc";
+ ret = of_property_read_u32_index(node, key, 1, &state->ipc_offset);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no offset in %s\n", key);
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(node, key, 2, &state->ipc_bit);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no bit in %s\n", key);
+ return -EINVAL;
+ }
+
+ }
+
+ return 0;
+}
+
+static const struct of_device_id qcom_smsm_of_match[] = {
+ { .compatible = "qcom,smsm" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
+
+static struct platform_driver qcom_smsm_driver = {
+ .probe = qcom_smsm_probe,
+ .driver = {
+ .name = "qcom_smsm",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_smsm_of_match,
+ },
+};
+
+static int __init qcom_smsm_init(void)
+{
+ return platform_driver_register(&qcom_smsm_driver);
+}
+arch_initcall(qcom_smsm_init);
+
+static void __exit qcom_smsm_exit(void)
+{
+ platform_driver_unregister(&qcom_smsm_driver);
+}
+module_exit(qcom_smsm_exit)
+
+MODULE_DESCRIPTION("Qualcomm Shared Memory Signaling Mechanism");
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 05bb7311ac5d..f32e090b3d85 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1388,6 +1388,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+struct edid *drm_get_edid_early(struct i2c_adapter *adapter)
+{
+ struct drm_connector dummy_connector;
+
+ return drm_get_edid(&dummy_connector, adapter);
+}
+EXPORT_SYMBOL(drm_get_edid_early);
+
/**
* drm_edid_duplicate - duplicate an EDID and the extensions
* @edid: EDID to duplicate
@@ -1409,7 +1417,7 @@ EXPORT_SYMBOL(drm_edid_duplicate);
*
* Returns true if @vendor is in @edid, false otherwise
*/
-static bool edid_vendor(struct edid *edid, char *vendor)
+bool edid_vendor(struct edid *edid, char *vendor)
{
char edid_vendor[3];
@@ -1420,7 +1428,7 @@ static bool edid_vendor(struct edid *edid, char *vendor)
return !strncmp(edid_vendor, vendor, 3);
}
-
+EXPORT_SYMBOL(edid_vendor);
/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 2d5ca8eec13a..e487f56ffc4c 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -45,9 +45,26 @@
* subset of the MIPI DCS command set.
*/
+static const struct device_type mipi_dsi_device_type;
+
static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
{
- return of_driver_match_device(dev, drv);
+ struct mipi_dsi_device *dsi;
+
+ dsi = dev->type == &mipi_dsi_device_type ?
+ to_mipi_dsi_device(dev) : NULL;
+
+ if (!dsi)
+ return 0;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!strcmp(drv->name, "mipi_dsi_dummy") &&
+ !strcmp(dsi->name, "dummy"))
+ return 1;
+
+ return 0;
}
static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
@@ -102,9 +119,41 @@ static const struct device_type mipi_dsi_device_type = {
.release = mipi_dsi_dev_release,
};
-static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
+struct mipi_dsi_device_info {
+ char name[DSI_DEV_NAME_SIZE];
+ u32 reg;
+ struct device_node *node;
+};
+
+static int __dsi_check_chan_busy(struct device *dev, void *data)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
+ u32 reg = *(u32 *) data;
+
+ if (dsi && dsi->channel == reg)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int mipi_dsi_check_chan_busy(struct mipi_dsi_host *host, u32 reg)
+{
+ return device_for_each_child(host->dev, &reg, __dsi_check_chan_busy);
+}
+
+static struct mipi_dsi_device *
+mipi_dsi_device_new(struct mipi_dsi_host *host,
+ struct mipi_dsi_device_info *info)
{
+ struct device *dev = host->dev;
struct mipi_dsi_device *dsi;
+ int r;
+
+ if (info->reg > 3) {
+ dev_err(dev, "dsi device %s has invalid channel value: %u\n",
+ info->name, info->reg);
+ return ERR_PTR(-EINVAL);
+ }
dsi = kzalloc(sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -114,62 +163,90 @@ static struct mipi_dsi_device *mipi_dsi_device_alloc(struct mipi_dsi_host *host)
dsi->dev.bus = &mipi_dsi_bus_type;
dsi->dev.parent = host->dev;
dsi->dev.type = &mipi_dsi_device_type;
+ dsi->dev.of_node = info->node;
+ dsi->channel = info->reg;
+ strlcpy(dsi->name, info->name, sizeof(dsi->name));
- device_initialize(&dsi->dev);
+ dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), info->reg);
- return dsi;
-}
-
-static int mipi_dsi_device_add(struct mipi_dsi_device *dsi)
-{
- struct mipi_dsi_host *host = dsi->host;
+ r = mipi_dsi_check_chan_busy(host, info->reg);
+ if (r)
+ goto err;
- dev_set_name(&dsi->dev, "%s.%d", dev_name(host->dev), dsi->channel);
+ r = device_register(&dsi->dev);
+ if (r)
+ goto err;
- return device_add(&dsi->dev);
+ return dsi;
+err:
+ kfree(dsi);
+ return ERR_PTR(r);
}
static struct mipi_dsi_device *
of_mipi_dsi_device_add(struct mipi_dsi_host *host, struct device_node *node)
{
- struct mipi_dsi_device *dsi;
struct device *dev = host->dev;
+ struct mipi_dsi_device_info info = { };
int ret;
- u32 reg;
- ret = of_property_read_u32(node, "reg", &reg);
+ if (of_modalias_node(node, info.name, sizeof(info.name)) < 0) {
+ dev_err(dev, "modalias failure on %s\n", node->full_name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = of_property_read_u32(node, "reg", &info.reg);
if (ret) {
dev_err(dev, "device node %s has no valid reg property: %d\n",
node->full_name, ret);
return ERR_PTR(-EINVAL);
}
- if (reg > 3) {
- dev_err(dev, "device node %s has invalid reg property: %u\n",
- node->full_name, reg);
- return ERR_PTR(-EINVAL);
- }
+ info.node = of_node_get(node);
- dsi = mipi_dsi_device_alloc(host);
- if (IS_ERR(dsi)) {
- dev_err(dev, "failed to allocate DSI device %s: %ld\n",
- node->full_name, PTR_ERR(dsi));
- return dsi;
- }
+ return mipi_dsi_device_new(host, &info);
+}
+
+static struct mipi_dsi_driver dummy_dsi_driver = {
+ .driver.name = "mipi_dsi_dummy",
+};
- dsi->dev.of_node = of_node_get(node);
- dsi->channel = reg;
+struct mipi_dsi_device *mipi_dsi_new_dummy(struct mipi_dsi_host *host, u32 reg)
+{
+ struct mipi_dsi_device_info info = { "dummy", reg, NULL, };
- ret = mipi_dsi_device_add(dsi);
- if (ret) {
- dev_err(dev, "failed to add DSI device %s: %d\n",
- node->full_name, ret);
- kfree(dsi);
- return ERR_PTR(ret);
+ return mipi_dsi_device_new(host, &info);
+}
+EXPORT_SYMBOL(mipi_dsi_new_dummy);
+
+void mipi_dsi_unregister_device(struct mipi_dsi_device *dsi)
+{
+ if (dsi)
+ device_unregister(&dsi->dev);
+}
+EXPORT_SYMBOL(mipi_dsi_unregister_device);
+
+static DEFINE_MUTEX(host_lock);
+static LIST_HEAD(host_list);
+
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node)
+{
+ struct mipi_dsi_host *host;
+
+ mutex_lock(&host_lock);
+
+ list_for_each_entry(host, &host_list, list) {
+ if (host->dev->of_node == node) {
+ mutex_unlock(&host_lock);
+ return host;
+ }
}
- return dsi;
+ mutex_unlock(&host_lock);
+
+ return NULL;
}
+EXPORT_SYMBOL(of_find_mipi_dsi_host_by_node);
int mipi_dsi_host_register(struct mipi_dsi_host *host)
{
@@ -182,6 +259,10 @@ int mipi_dsi_host_register(struct mipi_dsi_host *host)
of_mipi_dsi_device_add(host, node);
}
+ mutex_lock(&host_lock);
+ list_add_tail(&host->list, &host_list);
+ mutex_unlock(&host_lock);
+
return 0;
}
EXPORT_SYMBOL(mipi_dsi_host_register);
@@ -198,6 +279,10 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
void mipi_dsi_host_unregister(struct mipi_dsi_host *host)
{
device_for_each_child(host->dev, NULL, mipi_dsi_remove_device_fn);
+
+ mutex_lock(&host_lock);
+ list_del_init(&host->list);
+ mutex_unlock(&host_lock);
}
EXPORT_SYMBOL(mipi_dsi_host_unregister);
@@ -924,7 +1009,13 @@ EXPORT_SYMBOL(mipi_dsi_driver_unregister);
static int __init mipi_dsi_bus_init(void)
{
- return bus_register(&mipi_dsi_bus_type);
+ int ret;
+
+ ret = bus_register(&mipi_dsi_bus_type);
+ if (ret < 0)
+ return ret;
+
+ return mipi_dsi_driver_register(&dummy_dsi_driver);
}
postcore_initcall(mipi_dsi_bus_init);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 59aea5aa1fff..a4f55974fef2 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,6 +1,6 @@
ccflags-y := -Iinclude/drm
-obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
+obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o adv7511_audio.o
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f23b5cb..7298f794ae20 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -12,41 +12,19 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/of_graph.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder_slave.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
#include "adv7511.h"
-struct adv7511 {
- struct i2c_client *i2c_main;
- struct i2c_client *i2c_edid;
-
- struct regmap *regmap;
- struct regmap *packet_memory_regmap;
- enum drm_connector_status status;
- bool powered;
-
- unsigned int f_tmds;
-
- unsigned int current_edid_segment;
- uint8_t edid_buf[256];
- bool edid_read;
-
- wait_queue_head_t wq;
- struct drm_encoder *encoder;
-
- bool embedded_sync;
- enum adv7511_sync_polarity vsync_polarity;
- enum adv7511_sync_polarity hsync_polarity;
- bool rgb;
-
- struct edid *edid;
-
- struct gpio_desc *gpio_pd;
-};
+#define HPD_ENABLE 0
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
@@ -66,6 +44,24 @@ static const struct reg_sequence adv7511_fixed_registers[] = {
{ 0x55, 0x02 },
};
+/* ADI recommended values for proper operation. */
+static const struct reg_sequence adv7533_fixed_registers[] = {
+ { 0x16, 0x20 },
+ { 0x9a, 0xe0 },
+ { 0xba, 0x70 },
+ { 0xde, 0x82 },
+ { 0xe4, 0x40 },
+ { 0xe5, 0x80 },
+};
+
+static const struct reg_sequence adv7533_cec_fixed_registers[] = {
+ { 0x15, 0xd0 },
+ { 0x17, 0xd0 },
+ { 0x24, 0x20 },
+ { 0x57, 0x11 },
+ { 0x05, 0xc8 },
+};
+
/* -----------------------------------------------------------------------------
* Register access
*/
@@ -158,6 +154,15 @@ static const struct regmap_config adv7511_regmap_config = {
.volatile_reg = adv7511_register_volatile,
};
+static const struct regmap_config adv7533_cec_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = 0xff,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+
/* -----------------------------------------------------------------------------
* Hardware configuration
*/
@@ -193,7 +198,7 @@ static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
ADV7511_CSC_UPDATE_MODE, 0);
}
-static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -208,7 +213,7 @@ static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
return 0;
}
-static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
{
if (packet & 0xff)
regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
@@ -358,6 +363,73 @@ static void adv7511_set_link_config(struct adv7511 *adv7511,
adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
}
+static void adv7511_dsi_config_tgen(struct adv7511 *adv7511)
+{
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ struct drm_display_mode *mode = &adv7511->curr_mode;
+ u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
+ unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
+
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ /* set pixel clock divider mode */
+ regmap_write(adv7511->regmap_cec, 0x16,
+ clock_div_by_lanes[dsi->lanes - 2] << 3);
+
+ /* horizontal porch params */
+ regmap_write(adv7511->regmap_cec, 0x28, mode->htotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2a, hsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2b, (hsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2c, hfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2d, (hfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x2e, hbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x2f, (hbp << 4) & 0xff);
+
+ /* vertical porch params */
+ regmap_write(adv7511->regmap_cec, 0x30, mode->vtotal >> 4);
+ regmap_write(adv7511->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x32, vsw >> 4);
+ regmap_write(adv7511->regmap_cec, 0x33, (vsw << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x34, vfp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x35, (vfp << 4) & 0xff);
+ regmap_write(adv7511->regmap_cec, 0x36, vbp >> 4);
+ regmap_write(adv7511->regmap_cec, 0x37, (vbp << 4) & 0xff);
+}
+
+static void adv7511_dsi_receiver_dpms(struct adv7511 *adv7511)
+{
+ if (adv7511->type != ADV7533)
+ return;
+
+ if (adv7511->powered) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+
+ adv7511_dsi_config_tgen(adv7511);
+
+ /* set number of dsi lanes */
+ regmap_write(adv7511->regmap_cec, 0x1c, dsi->lanes << 4);
+
+ /* reset internal timing generator */
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x8b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0xcb);
+
+ /* enable hdmi */
+ regmap_write(adv7511->regmap_cec, 0x03, 0x89);
+ /* disable test mode */
+ regmap_write(adv7511->regmap_cec, 0x55, 0x00);
+ } else {
+ regmap_write(adv7511->regmap_cec, 0x03, 0x0b);
+ regmap_write(adv7511->regmap_cec, 0x27, 0x0b);
+ }
+}
+
static void adv7511_power_on(struct adv7511 *adv7511)
{
adv7511->current_edid_segment = -1;
@@ -386,7 +458,13 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
+ if (adv7511->type == ADV7533)
+ regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
adv7511->powered = true;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
static void adv7511_power_off(struct adv7511 *adv7511)
@@ -398,12 +476,15 @@ static void adv7511_power_off(struct adv7511 *adv7511)
regcache_mark_dirty(adv7511->regmap);
adv7511->powered = false;
+
+ adv7511_dsi_receiver_dpms(adv7511);
}
/* -----------------------------------------------------------------------------
* Interrupt and hotplug detection
*/
+#if HPD_ENABLE
static bool adv7511_hpd(struct adv7511 *adv7511)
{
unsigned int irq0;
@@ -421,8 +502,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}
+#endif
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,7 +520,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
drm_helper_hpd_irq_event(adv7511->encoder->dev);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -456,7 +538,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +555,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -555,18 +637,19 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
}
/* -----------------------------------------------------------------------------
- * Encoder operations
+ * ADV75xx helpers
*/
-
-static int adv7511_get_modes(struct drm_encoder *encoder,
- struct drm_connector *connector)
+static int adv7511_get_modes(struct adv7511 *adv7511,
+ struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
struct edid *edid;
unsigned int count;
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
+ ADV7511_REG_POWER2_HDP_SRC_MASK,
+ ADV7511_REG_POWER2_HDP_SRC_NONE);
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
ADV7511_INT0_EDID_READY);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
@@ -596,24 +679,15 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
return count;
}
-static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
-
- if (mode == DRM_MODE_DPMS_ON)
- adv7511_power_on(adv7511);
- else
- adv7511_power_off(adv7511);
-}
-
static enum drm_connector_status
-adv7511_encoder_detect(struct drm_encoder *encoder,
+adv7511_detect(struct adv7511 *adv7511,
struct drm_connector *connector)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
enum drm_connector_status status;
unsigned int val;
+#if HPD_ENABLE
bool hpd;
+#endif
int ret;
ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
@@ -625,6 +699,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
else
status = connector_status_disconnected;
+#if HPD_ENABLE
hpd = adv7511_hpd(adv7511);
/* The chip resets itself when the cable is disconnected, so in case
@@ -634,7 +709,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- adv7511_get_modes(encoder, connector);
+ adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -643,13 +718,14 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
ADV7511_REG_POWER2_HDP_SRC_MASK,
ADV7511_REG_POWER2_HDP_SRC_BOTH);
}
+#endif
adv7511->status = status;
return status;
}
-static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
- struct drm_display_mode *mode)
+static int adv7511_mode_valid(struct adv7511 *adv7511,
+ const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -657,11 +733,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
return MODE_OK;
}
-static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+static void adv7511_mode_set(struct adv7511 *adv7511,
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
unsigned int low_refresh_rate;
unsigned int hsync_polarity = 0;
unsigned int vsync_polarity = 0;
@@ -744,6 +819,28 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
+ if (adv7511->type == ADV7533 && adv7511->num_dsi_lanes == 4) {
+ struct mipi_dsi_device *dsi = adv7511->dsi;
+ int lanes, ret;
+
+ if (adj_mode->clock > 80000)
+ lanes = 4;
+ else
+ lanes = 3;
+
+ if (lanes != dsi->lanes) {
+ mipi_dsi_detach(dsi);
+ dsi->lanes = lanes;
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ DRM_ERROR("Failed to change host lanes\n");
+ return;
+ }
+ }
+ }
+
+ drm_mode_copy(&adv7511->curr_mode, adj_mode);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
@@ -752,12 +849,247 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
adv7511->f_tmds = mode->clock;
}
+/* -----------------------------------------------------------------------------
+ * Encoder operations
+ */
+
+static int adv7511_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_get_modes(adv7511, connector);
+}
+
+static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ if (mode == DRM_MODE_DPMS_ON)
+ adv7511_power_on(adv7511);
+ else
+ adv7511_power_off(adv7511);
+}
+
+static enum drm_connector_status
+adv7511_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_detect(adv7511, connector);
+}
+
+static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ return adv7511_mode_valid(adv7511, mode);
+}
+
+static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
+
+ adv7511_mode_set(adv7511, mode, adj_mode);
+}
+
static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
.dpms = adv7511_encoder_dpms,
.mode_valid = adv7511_encoder_mode_valid,
.mode_set = adv7511_encoder_mode_set,
.detect = adv7511_encoder_detect,
- .get_modes = adv7511_get_modes,
+ .get_modes = adv7511_encoder_get_modes,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge and connector functions
+ */
+
+static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
+{
+ return container_of(connector, struct adv7511, connector);
+}
+
+/* Connector helper functions */
+static int adv7533_connector_get_modes(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_get_modes(adv, connector);
+}
+
+static struct drm_encoder *
+adv7533_connector_best_encoder(struct drm_connector *connector)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv->bridge.encoder;
+}
+
+static enum drm_mode_status
+adv7533_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_mode_valid(adv, mode);
+}
+
+static struct drm_connector_helper_funcs adv7533_connector_helper_funcs = {
+ .get_modes = adv7533_connector_get_modes,
+ .best_encoder = adv7533_connector_best_encoder,
+ .mode_valid = adv7533_connector_mode_valid,
+};
+
+static enum drm_connector_status
+adv7533_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct adv7511 *adv = connector_to_adv7511(connector);
+
+ return adv7511_detect(adv, connector);
+}
+
+static struct drm_connector_funcs adv7533_connector_funcs = {
+ .dpms = drm_atomic_helper_connector_dpms,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = adv7533_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* Bridge funcs */
+static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
+static void adv7533_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_power_on(adv);
+}
+
+static void adv7533_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+#if HPD_ENABLE
+ if (!adv7511->powered)
+ return;
+#endif
+
+ adv7511_power_off(adv);
+}
+
+static void adv7533_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void adv7533_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+
+ adv7511_mode_set(adv, mode, adj_mode);
+}
+
+static int adv7533_attach_dsi(struct adv7511 *adv7511)
+{
+ struct device *dev = &adv7511->i2c_main->dev;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(adv7511->host_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* can adv7533 virtual channel be non-zero? */
+ dsi = mipi_dsi_new_dummy(host, 0);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dummy dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_device;
+ }
+
+ adv7511->dsi = dsi;
+
+ dsi->lanes = adv7511->num_dsi_lanes;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE
+ | MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_unregister_device(dsi);
+err_dsi_device:
+ return ret;
+}
+
+static int adv7533_bridge_attach(struct drm_bridge *bridge)
+{
+ struct adv7511 *adv = bridge_to_adv7511(bridge);
+ int ret;
+
+ adv->encoder = bridge->encoder;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+#if HPD_ENABLE
+ adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
+#endif
+
+ ret = drm_connector_init(bridge->dev, &adv->connector,
+ &adv7533_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+ drm_connector_helper_add(&adv->connector,
+ &adv7533_connector_helper_funcs);
+ drm_connector_register(&adv->connector);
+ drm_mode_connector_attach_encoder(&adv->connector, adv->encoder);
+
+#if HPD_ENABLE
+ drm_helper_hpd_irq_event(adv->connector.dev);
+#endif
+
+ adv7533_attach_dsi(adv);
+
+ return ret;
+}
+
+static struct drm_bridge_funcs adv7533_bridge_funcs = {
+ .pre_enable = adv7533_bridge_pre_enable,
+ .enable = adv7533_bridge_enable,
+ .disable = adv7533_bridge_disable,
+ .post_disable = adv7533_bridge_post_disable,
+ .mode_set = adv7533_bridge_mode_set,
+ .attach = adv7533_bridge_attach,
};
/* -----------------------------------------------------------------------------
@@ -770,8 +1102,6 @@ static int adv7511_parse_dt(struct device_node *np,
const char *str;
int ret;
- memset(config, 0, sizeof(*config));
-
of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
config->input_color_depth != 12)
@@ -849,10 +1179,54 @@ static int adv7511_parse_dt(struct device_node *np,
return 0;
}
+static int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv7511)
+{
+ u32 num_lanes;
+ struct device_node *endpoint;
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+ if (num_lanes < 1 || num_lanes > 4)
+ return -EINVAL;
+
+ adv7511->num_dsi_lanes = num_lanes;
+
+ endpoint = of_graph_get_next_endpoint(np, NULL);
+ if (!endpoint) {
+ DRM_ERROR("adv dsi input endpoint not found\n");
+ return -ENODEV;
+ }
+
+ adv7511->host_node = of_graph_get_remote_port_parent(endpoint);
+ if (!adv7511->host_node) {
+ DRM_ERROR("dsi host node not found\n");
+ of_node_put(endpoint);
+ return -ENODEV;
+ }
+
+ of_node_put(endpoint);
+ of_node_put(adv7511->host_node);
+
+ /* TODO: Check if these need to be parsed by DT or not */
+ adv7511->rgb = true;
+ adv7511->embedded_sync = false;
+
+ return 0;
+}
+
static const int edid_i2c_addr = 0x7e;
static const int packet_i2c_addr = 0x70;
static const int cec_i2c_addr = 0x78;
+static const struct of_device_id adv7511_of_ids[] = {
+ { .compatible = "adi,adv7511", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7511w", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7513", .data = (void *) ADV7511 },
+ { .compatible = "adi,adv7533", .data = (void *) ADV7533 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adv7511_of_ids);
+
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -871,7 +1245,21 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->powered = false;
adv7511->status = connector_status_disconnected;
- ret = adv7511_parse_dt(dev->of_node, &link_config);
+ if (dev->of_node) {
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(adv7511_of_ids, dev->of_node);
+ adv7511->type = (enum adv7511_type) of_id->data;
+ } else {
+ adv7511->type = id->driver_data;
+ }
+
+ memset(&link_config, 0, sizeof(link_config));
+
+ if (adv7511->type == ADV7511)
+ ret = adv7511_parse_dt(dev->of_node, &link_config);
+ else
+ ret = adv7533_parse_dt(dev->of_node, adv7511);
if (ret)
return ret;
@@ -897,10 +1285,19 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
return ret;
dev_dbg(dev, "Rev. %d\n", val);
- ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
- ARRAY_SIZE(adv7511_fixed_registers));
- if (ret)
- return ret;
+ if (adv7511->type == ADV7511) {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7511_fixed_registers,
+ ARRAY_SIZE(adv7511_fixed_registers));
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_register_patch(adv7511->regmap,
+ adv7533_fixed_registers,
+ ARRAY_SIZE(adv7533_fixed_registers));
+ if (ret)
+ return ret;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
@@ -913,6 +1310,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
if (!adv7511->i2c_edid)
return -ENOMEM;
+ adv7511->i2c_cec = i2c_new_dummy(i2c->adapter, cec_i2c_addr >> 1);
+ if (!adv7511->i2c_cec) {
+ ret = -ENOMEM;
+ goto err_i2c_unregister_edid;
+ }
+
+ adv7511->regmap_cec = devm_regmap_init_i2c(adv7511->i2c_cec,
+ &adv7533_cec_regmap_config);
+ if (IS_ERR(adv7511->regmap_cec)) {
+ ret = PTR_ERR(adv7511->regmap_cec);
+ goto err_i2c_unregister_cec;
+ }
+
+ if (adv7511->type == ADV7533) {
+ ret = regmap_register_patch(adv7511->regmap_cec,
+ adv7533_cec_fixed_registers,
+ ARRAY_SIZE(adv7533_cec_fixed_registers));
+ if (ret)
+ return ret;
+ }
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -921,7 +1339,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
IRQF_ONESHOT, dev_name(dev),
adv7511);
if (ret)
- goto err_i2c_unregister_device;
+ goto err_i2c_unregister_cec;
}
/* CEC is unused for now */
@@ -932,11 +1350,27 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
i2c_set_clientdata(i2c, adv7511);
- adv7511_set_link_config(adv7511, &link_config);
+ if (adv7511->type == ADV7511)
+ adv7511_set_link_config(adv7511, &link_config);
+
+ if (adv7511->type == ADV7533) {
+ adv7511->bridge.funcs = &adv7533_bridge_funcs;
+ adv7511->bridge.of_node = dev->of_node;
+
+ ret = drm_bridge_add(&adv7511->bridge);
+ if (ret) {
+ dev_err(dev, "failed to add adv7533 bridge\n");
+ goto err_i2c_unregister_cec;
+ }
+ }
+
+ adv7511_audio_init(dev);
return 0;
-err_i2c_unregister_device:
+err_i2c_unregister_cec:
+ i2c_unregister_device(adv7511->i2c_cec);
+err_i2c_unregister_edid:
i2c_unregister_device(adv7511->i2c_edid);
return ret;
@@ -946,10 +1380,18 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ adv7511_audio_exit(&i2c->dev);
+ i2c_unregister_device(adv7511->i2c_cec);
i2c_unregister_device(adv7511->i2c_edid);
kfree(adv7511->edid);
+ if (adv7511->type == ADV7533) {
+ mipi_dsi_detach(adv7511->dsi);
+ mipi_dsi_unregister_device(adv7511->dsi);
+ drm_bridge_remove(&adv7511->bridge);
+ }
+
return 0;
}
@@ -959,6 +1401,9 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+ if (adv7511->type == ADV7533)
+ return -ENODEV;
+
encoder->slave_priv = adv7511;
encoder->slave_funcs = &adv7511_encoder_funcs;
@@ -968,21 +1413,14 @@ static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
}
static const struct i2c_device_id adv7511_i2c_ids[] = {
- { "adv7511", 0 },
- { "adv7511w", 0 },
- { "adv7513", 0 },
+ { "adv7511", ADV7511 },
+ { "adv7511w", ADV7511 },
+ { "adv7513", ADV7511 },
+ { "adv7533", ADV7533 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
-static const struct of_device_id adv7511_of_ids[] = {
- { .compatible = "adi,adv7511", },
- { .compatible = "adi,adv7511w", },
- { .compatible = "adi,adv7513", },
- { }
-};
-MODULE_DEVICE_TABLE(of, adv7511_of_ids);
-
static struct drm_i2c_encoder_driver adv7511_driver = {
.i2c_driver = {
.driver = {
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
index 6599ed538426..e9008bf69caa 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -10,6 +10,16 @@
#define __DRM_I2C_ADV7511_H__
#include <linux/hdmi.h>
+#include <drm/drm_crtc_helper.h>
+
+struct regmap;
+struct adv7511;
+
+int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet);
+int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet);
+
+int adv7511_audio_init(struct device *dev);
+void adv7511_audio_exit(struct device *dev);
#define ADV7511_REG_CHIP_REVISION 0x00
#define ADV7511_REG_N0 0x01
@@ -229,6 +239,54 @@ enum adv7511_sync_polarity {
ADV7511_SYNC_POLARITY_HIGH,
};
+enum adv7511_type {
+ ADV7511,
+ ADV7533,
+};
+
+struct adv7511 {
+ struct i2c_client *i2c_main;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_cec;
+
+ struct regmap *regmap;
+ struct regmap *regmap_cec;
+ enum drm_connector_status status;
+ bool powered;
+
+ struct drm_display_mode curr_mode;
+
+ unsigned int f_tmds;
+ unsigned int f_audio;
+ unsigned int audio_source;
+
+ unsigned int current_edid_segment;
+ uint8_t edid_buf[256];
+ bool edid_read;
+
+ wait_queue_head_t wq;
+ struct drm_encoder *encoder;
+
+ struct drm_connector connector;
+ struct drm_bridge bridge;
+
+ bool embedded_sync;
+ enum adv7511_sync_polarity vsync_polarity;
+ enum adv7511_sync_polarity hsync_polarity;
+ bool rgb;
+
+ struct edid *edid;
+
+ struct gpio_desc *gpio_pd;
+
+ /* ADV7533 DSI RX related params */
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ u8 num_dsi_lanes;
+
+ enum adv7511_type type;
+};
+
/**
* struct adv7511_link_config - Describes adv7511 hardware configuration
* @input_color_depth: Number of bits per color component (8, 10 or 12)
diff --git a/drivers/gpu/drm/i2c/adv7511_audio.c b/drivers/gpu/drm/i2c/adv7511_audio.c
new file mode 100644
index 000000000000..52019e95d007
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511_audio.c
@@ -0,0 +1,312 @@
+/*
+ * Analog Devices ADV7511 HDMI transmitter driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "adv7511.h"
+
+static const struct snd_soc_dapm_widget adv7511_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("TMDS"),
+ SND_SOC_DAPM_AIF_IN("AIFIN", "Playback", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route adv7511_routes[] = {
+ { "TMDS", NULL, "AIFIN" },
+};
+
+static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
+ unsigned int *cts, unsigned int *n)
+{
+ switch (fs) {
+ case 32000:
+ *n = 4096;
+ break;
+ case 44100:
+ *n = 6272;
+ break;
+ case 48000:
+ *n = 6144;
+ break;
+ }
+
+ *cts = ((f_tmds * *n) / (128 * fs)) * 1000;
+}
+
+static int adv7511_update_cts_n(struct adv7511 *adv7511)
+{
+ unsigned int cts = 0;
+ unsigned int n = 0;
+
+ adv7511_calc_cts_n(adv7511->f_tmds, adv7511->f_audio, &cts, &n);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_N0, (n >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_N1, (n >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_N2, n & 0xff);
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL0,
+ (cts >> 16) & 0xf);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL1,
+ (cts >> 8) & 0xff);
+ regmap_write(adv7511->regmap, ADV7511_REG_CTS_MANUAL2,
+ cts & 0xff);
+
+ return 0;
+}
+
+static int adv7511_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int rate;
+ unsigned int len;
+ switch (params_rate(params)) {
+ case 32000:
+ rate = ADV7511_SAMPLE_FREQ_32000;
+ break;
+ case 44100:
+ rate = ADV7511_SAMPLE_FREQ_44100;
+ break;
+ case 48000:
+ rate = ADV7511_SAMPLE_FREQ_48000;
+ break;
+ case 88200:
+ rate = ADV7511_SAMPLE_FREQ_88200;
+ break;
+ case 96000:
+ rate = ADV7511_SAMPLE_FREQ_96000;
+ break;
+ case 176400:
+ rate = ADV7511_SAMPLE_FREQ_176400;
+ break;
+ case 192000:
+ rate = ADV7511_SAMPLE_FREQ_192000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_16;
+ break;
+ case SNDRV_PCM_FORMAT_S18_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_18;
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ len = ADV7511_I2S_SAMPLE_LEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ len = ADV7511_I2S_SAMPLE_LEN_24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ adv7511->f_audio = params_rate(params);
+
+ adv7511_update_cts_n(adv7511);
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG3,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+ regmap_write(adv7511->regmap, 0x73, 0x1);
+
+ return 0;
+}
+
+static int adv7511_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int fmt)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ unsigned int audio_source, i2s_format = 0;
+ unsigned int invert_clock;
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_RIGHT_J;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ audio_source = ADV7511_AUDIO_SOURCE_I2S;
+ i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
+ break;
+// case SND_SOC_DAIFMT_SPDIF:
+// audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+// break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBS_CFS:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ invert_clock = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ invert_clock = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_SOURCE, 0x70,
+ audio_source << 4);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(6),
+ invert_clock << 6);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2S_CONFIG, 0x03,
+ i2s_format);
+
+ adv7511->audio_source = audio_source;
+
+ return 0;
+}
+
+static int adv7511_set_bias_level(struct snd_soc_codec *codec,
+ enum snd_soc_bias_level level)
+{
+ struct adv7511 *adv7511 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
+
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ switch (adv7511->audio_source) {
+ case ADV7511_AUDIO_SOURCE_I2S:
+ break;
+ case ADV7511_AUDIO_SOURCE_SPDIF:
+ regmap_update_bits(adv7511->regmap,
+ ADV7511_REG_AUDIO_CONFIG, BIT(7),
+ BIT(7));
+ break;
+ }
+ break;
+ case SND_SOC_BIAS_PREPARE:
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_enable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ } else {
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_SAMPLE);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME);
+ adv7511_packet_disable(adv7511,
+ ADV7511_PACKET_ENABLE_N_CTS);
+ }
+ break;
+ case SND_SOC_BIAS_STANDBY:
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
+ break;
+ case SND_SOC_BIAS_OFF:
+ break;
+ }
+ dapm->bias_level = level;
+ return 0;
+}
+
+#define ADV7511_RATES (SNDRV_PCM_RATE_32000 |\
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |\
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+#define ADV7511_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE |\
+ SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE)
+
+static const struct snd_soc_dai_ops adv7511_dai_ops = {
+ .hw_params = adv7511_hw_params,
+ /*.set_sysclk = adv7511_set_dai_sysclk,*/
+ .set_fmt = adv7511_set_dai_fmt,
+};
+
+static struct snd_soc_dai_driver adv7511_dai = {
+ .name = "adv7511",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = ADV7511_RATES,
+ .formats = ADV7511_FORMATS,
+ },
+ .ops = &adv7511_dai_ops,
+};
+
+static int adv7511_suspend(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+}
+
+static int adv7511_resume(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_probe(struct snd_soc_codec *codec)
+{
+ return adv7511_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+}
+
+static int adv7511_remove(struct snd_soc_codec *codec)
+{
+ adv7511_set_bias_level(codec, SND_SOC_BIAS_OFF);
+ return 0;
+}
+
+static struct snd_soc_codec_driver adv7511_codec_driver = {
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .suspend = adv7511_suspend,
+ .resume = adv7511_resume,
+ .set_bias_level = adv7511_set_bias_level,
+
+ .dapm_widgets = adv7511_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(adv7511_dapm_widgets),
+ .dapm_routes = adv7511_routes,
+ .num_dapm_routes = ARRAY_SIZE(adv7511_routes),
+};
+
+int adv7511_audio_init(struct device *dev)
+{
+ return snd_soc_register_codec(dev, &adv7511_codec_driver,
+ &adv7511_dai, 1);
+}
+
+void adv7511_audio_exit(struct device *dev)
+{
+ snd_soc_unregister_codec(dev);
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 8d82973fe9db..d62e11eb3a55 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -131,6 +131,7 @@ struct msm_dsi_host {
enum mipi_dsi_pixel_format format;
unsigned long mode_flags;
+ u32 dlane_swap;
u32 dma_cmd_ctrl_restore;
bool registered;
@@ -684,19 +685,9 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
data = DSI_CTRL_CLK_EN;
DBG("lane number=%d", msm_host->lanes);
- if (msm_host->lanes == 2) {
- data |= DSI_CTRL_LANE1 | DSI_CTRL_LANE2;
- /* swap lanes for 2-lane panel for better performance */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_1230));
- } else {
- /* Take 4 lanes as default */
- data |= DSI_CTRL_LANE0 | DSI_CTRL_LANE1 | DSI_CTRL_LANE2 |
- DSI_CTRL_LANE3;
- /* Do not swap lanes for 4-lane panel */
- dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
- DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(LANE_SWAP_0123));
- }
+ data |= ((DSI_CTRL_LANE0 << msm_host->lanes) - DSI_CTRL_LANE0);
+ dsi_write(msm_host, REG_DSI_LANE_SWAP_CTRL,
+ DSI_LANE_SWAP_CTRL_DLN_SWAP_SEL(msm_host->dlane_swap));
if (!(flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
dsi_write(msm_host, REG_DSI_LANE_CTRL,
@@ -765,7 +756,9 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
dsi_write(msm_host, REG_DSI_RESET, 1);
wmb(); /* make sure reset happen */
+ mdelay(100);
dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb();
}
static void dsi_op_mode_config(struct msm_dsi_host *msm_host,
@@ -1289,20 +1282,21 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
+ if (dsi->lanes > 4 || dsi->channel > 3)
+ return -EINVAL;
+
msm_host->channel = dsi->channel;
msm_host->lanes = dsi->lanes;
msm_host->format = dsi->format;
msm_host->mode_flags = dsi->mode_flags;
- WARN_ON(dsi->dev.of_node != msm_host->device_node);
-
/* Some gpios defined in panel DT need to be controlled by host */
ret = dsi_host_init_panel_gpios(msm_host, &dsi->dev);
if (ret)
return ret;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
@@ -1316,7 +1310,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
msm_host->device_node = NULL;
DBG("id=%d", msm_host->id);
- if (msm_host->dev)
+ if (msm_host->dev && of_drm_find_panel(msm_host->device_node))
drm_helper_hpd_irq_event(msm_host->dev);
return 0;
@@ -1344,6 +1338,33 @@ static struct mipi_dsi_host_ops dsi_host_ops = {
.transfer = dsi_host_transfer,
};
+static void dsi_parse_dlane_swap(struct msm_dsi_host *msm_host,
+ struct device_node *np)
+{
+ const char *lane_swap;
+
+ lane_swap = of_get_property(np, "qcom,dsi-logical-lane-swap", NULL);
+
+ if (!lane_swap)
+ msm_host->dlane_swap = LANE_SWAP_0123;
+ else if (!strncmp(lane_swap, "3012", 5))
+ msm_host->dlane_swap = LANE_SWAP_3012;
+ else if (!strncmp(lane_swap, "2301", 5))
+ msm_host->dlane_swap = LANE_SWAP_2301;
+ else if (!strncmp(lane_swap, "1230", 5))
+ msm_host->dlane_swap = LANE_SWAP_1230;
+ else if (!strncmp(lane_swap, "0321", 5))
+ msm_host->dlane_swap = LANE_SWAP_0321;
+ else if (!strncmp(lane_swap, "1032", 5))
+ msm_host->dlane_swap = LANE_SWAP_1032;
+ else if (!strncmp(lane_swap, "2103", 5))
+ msm_host->dlane_swap = LANE_SWAP_2103;
+ else if (!strncmp(lane_swap, "3210", 5))
+ msm_host->dlane_swap = LANE_SWAP_3210;
+ else
+ msm_host->dlane_swap = LANE_SWAP_0123;
+}
+
static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
{
struct device *dev = &msm_host->pdev->dev;
@@ -1358,6 +1379,8 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
return ret;
}
+ dsi_parse_dlane_swap(msm_host, np);
+
/*
* Get the first endpoint node. In our case, dsi has one output port
* to which the panel is connected. Don't return an error if a port
@@ -1862,7 +1885,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host)
dsi_phy_sw_reset(msm_host);
ret = msm_dsi_manager_phy_enable(msm_host->id,
msm_host->byte_clk_rate * 8,
- clk_get_rate(msm_host->esc_clk),
+ 19200000,/*clk_get_rate(msm_host->esc_clk),*/
&clk_pre, &clk_post);
dsi_bus_clk_disable(msm_host);
if (ret) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..5e812b60a69d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -182,9 +182,20 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
}
+static const char *iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
static void mdp4_destroy(struct msm_kms *kms)
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+ struct msm_mmu *mmu = mdp4_kms->mmu;
+
+ if (mmu) {
+ mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+ mmu->funcs->destroy(mmu);
+ }
+
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
if (mdp4_kms->blank_cursor_bo)
@@ -262,6 +273,11 @@ static struct drm_panel *detect_panel(struct drm_device *dev)
of_node_put(endpoint);
+ if (!of_device_is_available(panel_node)) {
+ dev_err(dev->dev, "panel is not enabled in DT\n");
+ return ERR_PTR(-ENODEV);
+ }
+
panel = of_drm_find_panel(panel_node);
if (!panel) {
of_node_put(panel_node);
@@ -313,45 +329,55 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
if (IS_ERR(panel)) {
ret = PTR_ERR(panel);
dev_err(dev->dev, "failed to detect LVDS panel: %d\n", ret);
- goto fail;
- }
+ /**
+ * Only fail if there is panel but not ready yet
+ * continue with other stuff if there is no panel connected.
+ */
+ if (ret == -EPROBE_DEFER)
+ goto fail;
+ } else {
+ plane = mdp4_plane_init(dev, RGB2, true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev,
+ "failed to construct plane for RGB2\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
- plane = mdp4_plane_init(dev, RGB2, true);
- if (IS_ERR(plane)) {
- dev_err(dev->dev, "failed to construct plane for RGB2\n");
- ret = PTR_ERR(plane);
- goto fail;
- }
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev,
+ "failed to construct crtc for DMA_P\n");
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
- crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 0, DMA_P);
- if (IS_ERR(crtc)) {
- dev_err(dev->dev, "failed to construct crtc for DMA_P\n");
- ret = PTR_ERR(crtc);
- goto fail;
- }
+ encoder = mdp4_lcdc_encoder_init(dev, panel);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev,
+ "failed to construct LCDC encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
- encoder = mdp4_lcdc_encoder_init(dev, panel);
- if (IS_ERR(encoder)) {
- dev_err(dev->dev, "failed to construct LCDC encoder\n");
- ret = PTR_ERR(encoder);
- goto fail;
- }
+ /* LCDC can be hooked to DMA_P: */
+ encoder->possible_crtcs = 1 << priv->num_crtcs;
- /* LCDC can be hooked to DMA_P: */
- encoder->possible_crtcs = 1 << priv->num_crtcs;
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ priv->encoders[priv->num_encoders++] = encoder;
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->encoders[priv->num_encoders++] = encoder;
+ connector = mdp4_lvds_connector_init(dev, panel, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev->dev,
+ "failed to initialize LVDS connector: %d\n",
+ ret);
+ goto fail;
+ }
- connector = mdp4_lvds_connector_init(dev, panel, encoder);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- dev_err(dev->dev, "failed to initialize LVDS connector: %d\n", ret);
- goto fail;
+ priv->connectors[priv->num_connectors++] = connector;
}
- priv->connectors[priv->num_connectors++] = connector;
-
/*
* Setup DTV/HDMI path: RGB1 -> DMA_E -> DTV -> HDMI:
*/
@@ -398,10 +424,6 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
@@ -506,6 +528,8 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
+
+ mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..b3ab4ea0ecf0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -45,6 +45,7 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_mmu *mmu;
struct mdp_irq error_handler;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 047cb0433ccb..b50cc5c65b65 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -32,6 +32,7 @@ static int mdp5_hw_init(struct msm_kms *kms)
unsigned long flags;
pm_runtime_get_sync(dev->dev);
+ mdp5_enable(mdp5_kms);
/* Magic unknown register writes:
*
@@ -63,7 +64,8 @@ static int mdp5_hw_init(struct msm_kms *kms)
mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
- pm_runtime_put_sync(dev->dev);
+ mdp5_disable(mdp5_kms);
+ //pm_runtime_put_sync(dev->dev);
return 0;
}
@@ -578,7 +580,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
continue;
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
}
- mdp5_disable(mdp5_kms);
+ /* TODO: Remove this after runtime pm adaptation */
+ //mdp5_disable(mdp5_kms);
mdelay(16);
if (config->platform.iommu) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0339c5d82d37..28c9a2aeb314 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -415,7 +415,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
pm_runtime_get_sync(dev->dev);
ret = drm_irq_install(dev, platform_get_irq(dev->platformdev, 0));
- pm_runtime_put_sync(dev->dev);
+ //pm_runtime_put_sync(dev->dev);
if (ret < 0) {
dev_err(dev->dev, "failed to install IRQ handler\n");
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..f033d48cd1f3 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -18,6 +18,8 @@
#include "msm_drv.h"
#include "msm_mmu.h"
+#define DUMMY_CONTEXT 0x1
+
struct msm_iommu {
struct msm_mmu base;
struct iommu_domain *domain;
@@ -33,14 +35,51 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
+ struct device *dev = mmu->dev;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx)) {
+ dev_warn(dev, "couldn't get %s context", names[i]);
+ continue;
+ }
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ return iommu_attach_device(iommu->domain, mmu->dev);
+ } else {
+ ret = iommu_attach_device(iommu->domain, ctx);
+ }
+
+ if (ret) {
+ dev_warn(dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+
+ return 0;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- iommu_detach_device(iommu->domain, mmu->dev);
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *msm_iommu_get_ctx(const char *ctx_name);
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (IS_ERR_OR_NULL(ctx))
+ continue;
+
+ if (ctx == (struct device *)DUMMY_CONTEXT) {
+ iommu_detach_device(iommu->domain, mmu->dev);
+ break;
+ } else {
+ iommu_detach_device(iommu->domain, ctx);
+ }
+ }
}
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index f97b73ec4713..34d73a32e78a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_edid.h>
#include <video/display_timing.h>
#include <video/videomode.h>
@@ -70,6 +71,18 @@ struct panel_desc {
u32 bus_format;
};
+#define PANEL_PICKER_ENTRY(vend, pid, pdesc) \
+ .vendor = vend, \
+ .product_id = (pid), \
+ .data = (pdesc)
+
+/* Panel picker entry with vendor and product id */
+struct panel_picker_entry {
+ char vendor[4]; /* Vendor string */
+ int product_id; /* product id field */
+ const struct panel_desc *data;
+};
+
struct panel_simple {
struct drm_panel base;
bool prepared;
@@ -84,6 +97,8 @@ struct panel_simple {
struct gpio_desc *enable_gpio;
};
+static const struct panel_desc *panel_picker_find_panel(struct edid *edid);
+
static inline struct panel_simple *to_panel_simple(struct drm_panel *panel)
{
return container_of(panel, struct panel_simple, base);
@@ -276,11 +291,28 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static void __init simple_panel_node_disable(struct device_node *node)
+{
+ struct property *prop;
+
+ prop = kzalloc(sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return;
+
+ prop->name = "status";
+ prop->value = "disabled";
+ prop->length = strlen((char *)prop->value)+1;
+
+ of_update_property(node, prop);
+}
+
+
static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct device_node *backlight, *ddc;
struct panel_simple *panel;
int err;
+ struct edid *edid;
panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
if (!panel)
@@ -288,7 +320,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->enabled = false;
panel->prepared = false;
- panel->desc = desc;
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -316,7 +347,25 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->ddc = of_find_i2c_adapter_by_node(ddc);
of_node_put(ddc);
- if (!panel->ddc) {
+ if (panel->ddc) {
+ /* detect panel presence */
+ if (!drm_probe_ddc(panel->ddc)) {
+ err = -ENODEV;
+ goto nodev;
+ }
+
+ /* get panel from edid */
+ if (of_device_is_compatible(dev->of_node,
+ "panel-simple")) {
+ edid = drm_get_edid_early(panel->ddc);
+ if (edid) {
+ desc = panel_picker_find_panel(edid);
+ } else {
+ err = -ENODEV;
+ goto nodev;
+ }
+ }
+ } else {
err = -EPROBE_DEFER;
goto free_backlight;
}
@@ -325,6 +374,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
drm_panel_init(&panel->base);
panel->base.dev = dev;
panel->base.funcs = &panel_simple_funcs;
+ panel->desc = desc;
err = drm_panel_add(&panel->base);
if (err < 0)
@@ -334,6 +384,10 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return 0;
+nodev:
+ /* mark the dt as disabled */
+ simple_panel_node_disable(dev->of_node);
+
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -1096,6 +1150,10 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct panel_picker_entry panel_picker_list[] = {
+ { PANEL_PICKER_ENTRY("AUO", 0x10dc, &auo_b101xtn01) },
+};
+
static const struct of_device_id platform_of_match[] = {
{
.compatible = "ampire,am800480r3tmqwa1h",
@@ -1191,11 +1249,32 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ /* Panel Picker Vendor ID and Product ID based Lookup */
+ .compatible = "panel-simple",
+ }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, platform_of_match);
+static const struct panel_desc *panel_picker_find_panel(struct edid *edid)
+{
+ int i;
+ const struct panel_desc *desc = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(panel_picker_list); i++) {
+ const struct panel_picker_entry *vp = &panel_picker_list[i];
+
+ if (edid_vendor(edid, (char *)vp->vendor) &&
+ (EDID_PRODUCT_ID(edid) == vp->product_id)) {
+ desc = vp->data;
+ break;
+ }
+ }
+
+ return desc;
+}
+
static int panel_simple_platform_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 70a11ac38119..47031883d8d1 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2043,6 +2043,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT, USB_PROD_ID_LILLIPUT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f769208276ae..e10908b7ec88 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1047,4 +1047,8 @@
#define USB_DEVICE_ID_RAPHNET_2NES2SNES 0x0002
#define USB_DEVICE_ID_RAPHNET_4NES4SNES 0x0003
+/* Lilliput Capacitive TouchScreen */
+#define USB_VENDOR_ID_LILLIPUT 0x1391
+#define USB_PROD_ID_LILLIPUT 0x2112
+
#endif
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 5c6795509001..0d5880021590 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1169,6 +1169,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_CJTOUCH,
USB_DEVICE_ID_CJTOUCH_MULTI_TOUCH_0040) },
+ /* Lilliput multitouch panels */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_LILLIPUT,
+ USB_PROD_ID_LILLIPUT) },
+
/* CVTouch panels */
{ .driver_data = MT_CLS_NSMU,
MT_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c
index fdcbdab808e9..810b021c7ea0 100644
--- a/drivers/i2c/busses/i2c-qup.c
+++ b/drivers/i2c/busses/i2c-qup.c
@@ -24,6 +24,11 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/atomic.h>
+#include <linux/dmaengine.h>
+#include <linux/dmapool.h>
/* QUP Registers */
#define QUP_CONFIG 0x000
@@ -33,6 +38,7 @@
#define QUP_OPERATIONAL 0x018
#define QUP_ERROR_FLAGS 0x01c
#define QUP_ERROR_FLAGS_EN 0x020
+#define QUP_OPERATIONAL_MASK 0x028
#define QUP_HW_VERSION 0x030
#define QUP_MX_OUTPUT_CNT 0x100
#define QUP_OUT_FIFO_BASE 0x110
@@ -43,6 +49,8 @@
#define QUP_I2C_CLK_CTL 0x400
#define QUP_I2C_STATUS 0x404
+#define QUP_I2C_MASTER_GEN 0x408
+
/* QUP States and reset values */
#define QUP_RESET_STATE 0
#define QUP_RUN_STATE 1
@@ -51,6 +59,7 @@
#define QUP_STATE_VALID BIT(2)
#define QUP_I2C_MAST_GEN BIT(4)
+#define QUP_I2C_FLUSH BIT(6)
#define QUP_OPERATIONAL_RESET 0x000ff0
#define QUP_I2C_STATUS_RESET 0xfffffc
@@ -69,16 +78,22 @@
#define QUP_CLOCK_AUTO_GATE BIT(13)
#define I2C_MINI_CORE (2 << 8)
#define I2C_N_VAL 15
+#define I2C_N_VAL_V2 7
+
/* Most significant word offset in FIFO port */
#define QUP_MSW_SHIFT (I2C_N_VAL + 1)
/* Packing/Unpacking words in FIFOs, and IO modes */
#define QUP_OUTPUT_BLK_MODE (1 << 10)
+#define QUP_OUTPUT_BAM_MODE (3 << 10)
#define QUP_INPUT_BLK_MODE (1 << 12)
+#define QUP_INPUT_BAM_MODE (3 << 12)
+#define QUP_BAM_MODE (QUP_OUTPUT_BAM_MODE | QUP_INPUT_BAM_MODE)
#define QUP_UNPACK_EN BIT(14)
#define QUP_PACK_EN BIT(15)
#define QUP_REPACK_EN (QUP_UNPACK_EN | QUP_PACK_EN)
+#define QUP_V2_TAGS_EN 1
#define QUP_OUTPUT_BLOCK_SIZE(x)(((x) >> 0) & 0x03)
#define QUP_OUTPUT_FIFO_SIZE(x) (((x) >> 2) & 0x07)
@@ -90,6 +105,15 @@
#define QUP_TAG_DATA (2 << 8)
#define QUP_TAG_STOP (3 << 8)
#define QUP_TAG_REC (4 << 8)
+#define QUP_BAM_INPUT_EOT 0x93
+#define QUP_BAM_FLUSH_STOP 0x96
+
+/* QUP v2 tags */
+#define QUP_TAG_V2_START 0x81
+#define QUP_TAG_V2_DATAWR 0x82
+#define QUP_TAG_V2_DATAWR_STOP 0x83
+#define QUP_TAG_V2_DATARD 0x85
+#define QUP_TAG_V2_DATARD_STOP 0x87
/* Status, Error flags */
#define I2C_STATUS_WR_BUFFER_FULL BIT(0)
@@ -98,6 +122,43 @@
#define QUP_STATUS_ERROR_FLAGS 0x7c
#define QUP_READ_LIMIT 256
+#define SET_BIT 0x1
+#define RESET_BIT 0x0
+#define ONE_BYTE 0x1
+#define QUP_I2C_MX_CONFIG_DURING_RUN BIT(31)
+
+#define MX_TX_RX_LEN SZ_64K
+#define MX_BLOCKS (MX_TX_RX_LEN / QUP_READ_LIMIT)
+
+/* Max timeout in ms for 32k bytes */
+#define TOUT_MAX 300
+
+struct qup_i2c_block {
+ int count;
+ int pos;
+ int tx_tag_len;
+ int rx_tag_len;
+ int data_len;
+ u8 tags[6];
+ int config_run;
+};
+
+struct qup_i2c_tag {
+ u8 *start;
+ dma_addr_t addr;
+};
+
+struct qup_i2c_bam_rx {
+ struct qup_i2c_tag scratch_tag;
+ struct dma_chan *dma_rx;
+ struct scatterlist *sg_rx;
+};
+
+struct qup_i2c_bam_tx {
+ struct qup_i2c_tag footer_tag;
+ struct dma_chan *dma_tx;
+ struct scatterlist *sg_tx;
+};
struct qup_i2c_dev {
struct device *dev;
@@ -114,6 +175,7 @@ struct qup_i2c_dev {
int in_blk_sz;
unsigned long one_byte_t;
+ struct qup_i2c_block blk;
struct i2c_msg *msg;
/* Current posion in user message buffer */
@@ -123,6 +185,24 @@ struct qup_i2c_dev {
/* QUP core errors */
u32 qup_err;
+ int use_v2_tags;
+
+ int (*qup_i2c_write_one)(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg);
+ int (*qup_i2c_read_one)(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg);
+
+ /* Current i2c_msg in i2c_msgs */
+ int cmsg;
+ /* total num of i2c_msgs */
+ int num;
+
+ /* dma parameters */
+ bool is_dma;
+ struct dma_pool *dpool;
+ struct qup_i2c_tag start_tag;
+ struct qup_i2c_bam_rx brx;
+ struct qup_i2c_bam_tx btx;
struct completion xfer;
};
@@ -199,6 +279,14 @@ static int qup_i2c_poll_state(struct qup_i2c_dev *qup, u32 req_state)
return qup_i2c_poll_state_mask(qup, req_state, QUP_STATE_MASK);
}
+static void qup_i2c_flush(struct qup_i2c_dev *qup)
+{
+ u32 val = readl(qup->base + QUP_STATE);
+
+ val |= QUP_I2C_FLUSH;
+ writel(val, qup->base + QUP_STATE);
+}
+
static int qup_i2c_poll_state_valid(struct qup_i2c_dev *qup)
{
return qup_i2c_poll_state_mask(qup, 0, 0);
@@ -221,53 +309,72 @@ static int qup_i2c_change_state(struct qup_i2c_dev *qup, u32 state)
return 0;
}
-static int qup_i2c_wait_writeready(struct qup_i2c_dev *qup)
+/**
+ * qup_i2c_wait_ready - wait for a give number of bytes in tx/rx path
+ * @qup: The qup_i2c_dev device
+ * @op: The bit/event to wait on
+ * @val: value of the bit to wait on, 0 or 1
+ * @len: The length the bytes to be transferred
+ */
+static int qup_i2c_wait_ready(struct qup_i2c_dev *qup, int op, bool val,
+ int len)
{
unsigned long timeout;
u32 opflags;
u32 status;
+ u32 shift = __ffs(op);
- timeout = jiffies + HZ;
+ len *= qup->one_byte_t;
+ /* timeout after a wait of twice the max time */
+ timeout = jiffies + len * 4;
for (;;) {
opflags = readl(qup->base + QUP_OPERATIONAL);
status = readl(qup->base + QUP_I2C_STATUS);
- if (!(opflags & QUP_OUT_NOT_EMPTY) &&
- !(status & I2C_STATUS_BUS_ACTIVE))
- return 0;
+ if (((opflags & op) >> shift) == val) {
+ if ((op == QUP_OUT_NOT_EMPTY) &&
+ (qup->cmsg == (qup->num - 1))) {
+ if (!(status & I2C_STATUS_BUS_ACTIVE))
+ return 0;
+ } else {
+ return 0;
+ }
+ }
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
- usleep_range(qup->one_byte_t, qup->one_byte_t * 2);
+ usleep_range(len, len * 2);
}
}
static void qup_i2c_set_write_mode(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
/* Number of entries to shift out, including the start */
- int total = msg->len + 1;
+ int total = msg->len + qup->blk.tx_tag_len;
if (total < qup->out_fifo_sz) {
/* FIFO mode */
writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
- writel(total, qup->base + QUP_MX_WRITE_CNT);
+ writel(total | qup->blk.config_run,
+ qup->base + QUP_MX_WRITE_CNT);
} else {
/* BLOCK mode (transfer data on chunks) */
writel(QUP_OUTPUT_BLK_MODE | QUP_REPACK_EN,
qup->base + QUP_IO_MODE);
- writel(total, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(total | qup->blk.config_run,
+ qup->base + QUP_MX_OUTPUT_CNT);
}
}
-static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
u32 addr = msg->addr << 1;
u32 qup_tag;
- u32 opflags;
int idx;
u32 val;
+ int ret = 0;
if (qup->pos == 0) {
val = QUP_TAG_START | addr;
@@ -279,9 +386,10 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
while (qup->pos < msg->len) {
/* Check that there's space in the FIFO for our pair */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (opflags & QUP_OUT_FULL)
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL, RESET_BIT,
+ 4 * ONE_BYTE);
+ if (ret)
+ return ret;
if (qup->pos == msg->len - 1)
qup_tag = QUP_TAG_STOP;
@@ -300,55 +408,238 @@ static void qup_i2c_issue_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
qup->pos++;
idx++;
}
+
+ return ret;
}
-static int qup_i2c_write_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static void qup_i2c_get_blk_data(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
{
- unsigned long left;
+ memset(&qup->blk, 0, sizeof(qup->blk));
+
+ if (!qup->use_v2_tags) {
+ if (!(msg->flags & I2C_M_RD))
+ qup->blk.tx_tag_len = 1;
+ return;
+ }
+
+ qup->blk.data_len = msg->len;
+ qup->blk.count = (msg->len + QUP_READ_LIMIT - 1) / QUP_READ_LIMIT;
+
+ /* 4 bytes for first block and 2 writes for rest */
+ qup->blk.tx_tag_len = 4 + (qup->blk.count - 1) * 2;
+
+ /* There are 2 tag bytes that are read in to fifo for every block */
+ if (msg->flags & I2C_M_RD)
+ qup->blk.rx_tag_len = qup->blk.count * 2;
+
+ if (qup->cmsg)
+ qup->blk.config_run = QUP_I2C_MX_CONFIG_DURING_RUN;
+}
+
+static int qup_i2c_send_data(struct qup_i2c_dev *qup, int tlen, u8 *tbuf,
+ int dlen, u8 *dbuf)
+{
+ u32 val = 0, idx = 0, pos = 0, i = 0, t;
+ int len = tlen + dlen;
+ u8 *buf = tbuf;
+ int ret = 0;
+
+ while (len > 0) {
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_FULL,
+ RESET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo out full");
+ return ret;
+ }
+
+ t = (len >= 4) ? 4 : len;
+
+ while (idx < t) {
+ if (!i && (pos >= tlen)) {
+ buf = dbuf;
+ pos = 0;
+ i = 1;
+ }
+ val |= buf[pos++] << (idx++ * 8);
+ }
+
+ writel(val, qup->base + QUP_OUT_FIFO_BASE);
+ idx = 0;
+ val = 0;
+ len -= 4;
+ }
+
+ return ret;
+}
+
+static int qup_i2c_get_data_len(struct qup_i2c_dev *qup)
+{
+ int data_len;
+
+ if (qup->blk.data_len > QUP_READ_LIMIT)
+ data_len = QUP_READ_LIMIT;
+ else
+ data_len = qup->blk.data_len;
+
+ return data_len;
+}
+
+static int qup_i2c_get_tags(u8 *tags, struct qup_i2c_dev *qup,
+ struct i2c_msg *msg, int is_dma)
+{
+ u16 addr = (msg->addr << 1) | ((msg->flags & I2C_M_RD) == I2C_M_RD);
+ int len = 0;
+ int data_len;
+
+ int last = (qup->blk.pos == (qup->blk.count - 1)) &&
+ (qup->cmsg == (qup->num - 1));
+
+ if (qup->blk.pos == 0) {
+ tags[len++] = QUP_TAG_V2_START;
+ tags[len++] = addr & 0xff;
+
+ if (msg->flags & I2C_M_TEN)
+ tags[len++] = addr >> 8;
+ }
+
+ /* Send _STOP commands for the last block */
+ if (last) {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD_STOP;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR_STOP;
+ } else {
+ if (msg->flags & I2C_M_RD)
+ tags[len++] = QUP_TAG_V2_DATARD;
+ else
+ tags[len++] = QUP_TAG_V2_DATAWR;
+ }
+
+ data_len = qup_i2c_get_data_len(qup);
+
+ /* 0 implies 256 bytes */
+ if (data_len == QUP_READ_LIMIT)
+ tags[len++] = 0;
+ else
+ tags[len++] = data_len;
+
+ if ((msg->flags & I2C_M_RD) && last && is_dma) {
+ tags[len++] = QUP_BAM_INPUT_EOT;
+ tags[len++] = QUP_BAM_FLUSH_STOP;
+ }
+
+ return len;
+}
+
+static int qup_i2c_issue_xfer_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int data_len = 0, tag_len, index;
int ret;
- qup->msg = msg;
- qup->pos = 0;
+ tag_len = qup_i2c_get_tags(qup->blk.tags, qup, msg, 0);
+ index = msg->len - qup->blk.data_len;
- enable_irq(qup->irq);
+ /* only tags are written for read */
+ if (!(msg->flags & I2C_M_RD))
+ data_len = qup_i2c_get_data_len(qup);
- qup_i2c_set_write_mode(qup, msg);
+ ret = qup_i2c_send_data(qup, tag_len, qup->blk.tags,
+ data_len, &msg->buf[index]);
+ qup->blk.data_len -= data_len;
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
- if (ret)
- goto err;
+ return ret;
+}
- writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+static int qup_i2c_wait_for_complete(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ unsigned long left;
+ int ret = 0;
+
+ left = wait_for_completion_timeout(&qup->xfer, HZ);
+ if (!left) {
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = -ETIMEDOUT;
+ }
+
+ if (qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
+ ret = -EIO;
+ }
+ }
+
+ return ret;
+}
+
+static int qup_i2c_write_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
do {
- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
if (ret)
goto err;
- qup_i2c_issue_write(qup, msg);
-
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ ret = qup_i2c_wait_for_complete(qup, msg);
if (ret)
goto err;
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
- goto err;
- }
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
- ret = -EIO;
- goto err;
- }
+err:
+ return ret;
+}
+
+static int qup_i2c_write_one_v1(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ do {
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_issue_write(qup, msg);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ return ret;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ return ret;
} while (qup->pos < msg->len);
- /* Wait for the outstanding data in the fifo to drain */
- ret = qup_i2c_wait_writeready(qup);
+ return ret;
+}
+
+static int qup_i2c_write(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret;
+
+ qup->msg = msg;
+ qup->pos = 0;
+ enable_irq(qup->irq);
+ qup_i2c_get_blk_data(qup, msg);
+
+ qup_i2c_set_write_mode(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto err;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+ ret = qup->qup_i2c_write_one(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_ready(qup, QUP_OUT_NOT_EMPTY, RESET_BIT, ONE_BYTE);
err:
disable_irq(qup->irq);
qup->msg = NULL;
@@ -358,15 +649,22 @@ err:
static void qup_i2c_set_read_mode(struct qup_i2c_dev *qup, int len)
{
+ int tx_len = qup->blk.tx_tag_len;
+
+ len += qup->blk.rx_tag_len;
+ tx_len |= qup->blk.config_run;
+
if (len < qup->in_fifo_sz) {
/* FIFO mode */
writel(QUP_REPACK_EN, qup->base + QUP_IO_MODE);
- writel(len, qup->base + QUP_MX_READ_CNT);
+ writel(tx_len, qup->base + QUP_MX_WRITE_CNT);
+ writel(len | qup->blk.config_run, qup->base + QUP_MX_READ_CNT);
} else {
/* BLOCK mode (transfer data on chunks) */
writel(QUP_INPUT_BLK_MODE | QUP_REPACK_EN,
qup->base + QUP_IO_MODE);
- writel(len, qup->base + QUP_MX_INPUT_CNT);
+ writel(tx_len, qup->base + QUP_MX_OUTPUT_CNT);
+ writel(len | qup->blk.config_run, qup->base + QUP_MX_INPUT_CNT);
}
}
@@ -383,19 +681,19 @@ static void qup_i2c_issue_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
writel(val, qup->base + QUP_OUT_FIFO_BASE);
}
-
-static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- u32 opflags;
u32 val = 0;
int idx;
+ int ret = 0;
for (idx = 0; qup->pos < msg->len; idx++) {
if ((idx & 1) == 0) {
/* Check that FIFO have data */
- opflags = readl(qup->base + QUP_OPERATIONAL);
- if (!(opflags & QUP_IN_NOT_EMPTY))
- break;
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret)
+ return ret;
/* Reading 2 words at time */
val = readl(qup->base + QUP_IN_FIFO_BASE);
@@ -405,17 +703,117 @@ static void qup_i2c_read_fifo(struct qup_i2c_dev *qup, struct i2c_msg *msg)
msg->buf[qup->pos++] = val >> QUP_MSW_SHIFT;
}
}
+
+ return ret;
}
-static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+static int qup_i2c_read_fifo_v2(struct qup_i2c_dev *qup,
+ struct i2c_msg *msg)
+{
+ u32 val;
+ int idx, pos = 0, ret = 0, total;
+
+ total = qup_i2c_get_data_len(qup);
+
+ /* 2 extra bytes for read tags */
+ while (pos < (total + 2)) {
+ /* Check that FIFO have data */
+ ret = qup_i2c_wait_ready(qup, QUP_IN_NOT_EMPTY,
+ SET_BIT, 4 * ONE_BYTE);
+ if (ret) {
+ dev_err(qup->dev, "timeout for fifo not empty");
+ return ret;
+ }
+ val = readl(qup->base + QUP_IN_FIFO_BASE);
+
+ for (idx = 0; idx < 4; idx++, val >>= 8, pos++) {
+ /* first 2 bytes are tag bytes */
+ if (pos < 2)
+ continue;
+
+ if (pos >= (total + 2))
+ goto out;
+
+ msg->buf[qup->pos++] = val & 0xff;
+ }
+ }
+
+out:
+ qup->blk.data_len -= total;
+
+ return ret;
+}
+
+static int qup_i2c_read_one_v2(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ do {
+ ret = qup_i2c_issue_xfer_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ goto err;
+
+ ret = qup_i2c_read_fifo_v2(qup, msg);
+ if (ret)
+ goto err;
+
+ qup->blk.pos++;
+ } while (qup->blk.pos < qup->blk.count);
+
+err:
+ return ret;
+}
+
+static int qup_i2c_read_one_v1(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ int ret = 0;
+
+ /*
+ * The QUP block will issue a NACK and STOP on the bus when reaching
+ * the end of the read, the length of the read is specified as one byte
+ * which limits the possible read to 256 (QUP_READ_LIMIT) bytes.
+ */
+ if (msg->len > QUP_READ_LIMIT) {
+ dev_err(qup->dev, "HW not capable of reads over %d bytes\n",
+ QUP_READ_LIMIT);
+ return -EINVAL;
+ }
+
+ ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
+ if (ret)
+ return ret;
+
+ qup_i2c_issue_read(qup, msg);
+
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ return ret;
+
+ do {
+ ret = qup_i2c_wait_for_complete(qup, msg);
+ if (ret)
+ return ret;
+ ret = qup_i2c_read_fifo(qup, msg);
+ if (ret)
+ return ret;
+ } while (qup->pos < msg->len);
+
+ return ret;
+}
+
+static int qup_i2c_read(struct qup_i2c_dev *qup, struct i2c_msg *msg)
{
- unsigned long left;
int ret;
qup->msg = msg;
qup->pos = 0;
enable_irq(qup->irq);
+ qup_i2c_get_blk_data(qup, msg);
qup_i2c_set_read_mode(qup, msg->len);
@@ -425,38 +823,263 @@ static int qup_i2c_read_one(struct qup_i2c_dev *qup, struct i2c_msg *msg)
writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
- ret = qup_i2c_change_state(qup, QUP_PAUSE_STATE);
- if (ret)
- goto err;
+ ret = qup->qup_i2c_read_one(qup, msg);
- qup_i2c_issue_read(qup, msg);
+err:
+ disable_irq(qup->irq);
+ qup->msg = NULL;
- ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
- if (ret)
- goto err;
+ return ret;
+}
- do {
- left = wait_for_completion_timeout(&qup->xfer, HZ);
- if (!left) {
- writel(1, qup->base + QUP_SW_RESET);
- ret = -ETIMEDOUT;
- goto err;
+static void qup_i2c_bam_cb(void *data)
+{
+ struct qup_i2c_dev *qup = data;
+
+ complete(&qup->xfer);
+}
+
+void qup_sg_set_buf(struct scatterlist *sg, void *buf, struct qup_i2c_tag *tg,
+ unsigned int buflen, struct qup_i2c_dev *qup,
+ int map, int dir)
+{
+ sg_set_buf(sg, buf, buflen);
+ dma_map_sg(qup->dev, sg, 1, dir);
+
+ if (!map)
+ sg_dma_address(sg) = tg->addr + ((u8 *)buf - tg->start);
+}
+
+static void qup_i2c_rel_dma(struct qup_i2c_dev *qup)
+{
+ if (qup->btx.dma_tx)
+ dma_release_channel(qup->btx.dma_tx);
+ if (qup->brx.dma_rx)
+ dma_release_channel(qup->brx.dma_rx);
+ qup->btx.dma_tx = NULL;
+ qup->brx.dma_rx = NULL;
+}
+
+static int qup_i2c_req_dma(struct qup_i2c_dev *qup)
+{
+ if (!qup->btx.dma_tx) {
+ qup->btx.dma_tx = dma_request_slave_channel(qup->dev, "tx");
+ if (!qup->btx.dma_tx) {
+ dev_err(qup->dev, "\n tx channel not available");
+ return -ENODEV;
+ }
+ }
+
+ if (!qup->brx.dma_rx) {
+ qup->brx.dma_rx = dma_request_slave_channel(qup->dev, "rx");
+ if (!qup->brx.dma_rx) {
+ dev_err(qup->dev, "\n rx channel not available");
+ qup_i2c_rel_dma(qup);
+ return -ENODEV;
}
+ }
+ return 0;
+}
- if (qup->bus_err || qup->qup_err) {
- if (qup->bus_err & QUP_I2C_NACK_FLAG)
- dev_err(qup->dev, "NACK from %x\n", msg->addr);
+static int bam_do_xfer(struct qup_i2c_dev *qup, struct i2c_msg *msg)
+{
+ struct dma_async_tx_descriptor *txd, *rxd = NULL;
+ int ret = 0;
+ dma_cookie_t cookie_rx, cookie_tx;
+ u32 rx_nents = 0, tx_nents = 0, len, blocks, rem;
+ u32 i, tlen, tx_len, tx_buf = 0, rx_buf = 0, off = 0;
+ u8 *tags;
+
+ while (qup->cmsg < qup->num) {
+ blocks = (msg->len + QUP_READ_LIMIT) / QUP_READ_LIMIT;
+ rem = msg->len % QUP_READ_LIMIT;
+ tx_len = 0, len = 0, i = 0;
+
+ qup_i2c_get_blk_data(qup, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ rx_nents += (blocks * 2) + 1;
+ tx_nents += 1;
+
+ while (qup->blk.pos < blocks) {
+ /* length set to '0' implies 256 bytes */
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + len];
+ len += qup_i2c_get_tags(tags, qup, msg, 1);
+
+ /* scratch buf to read the start and len tags */
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &qup->brx.scratch_tag.start[0],
+ &qup->brx.scratch_tag,
+ 2, qup, 0, 0);
+
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &msg->buf[QUP_READ_LIMIT * i],
+ NULL, tlen, qup,
+ 1, DMA_FROM_DEVICE);
+ i++;
+ qup->blk.pos = i;
+ }
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &qup->start_tag.start[off],
+ &qup->start_tag, len, qup, 0, 0);
+ off += len;
+ /* scratch buf to read the BAM EOT and FLUSH tags */
+ qup_sg_set_buf(&qup->brx.sg_rx[rx_buf++],
+ &qup->brx.scratch_tag.start[0],
+ &qup->brx.scratch_tag, 2,
+ qup, 0, 0);
+ } else {
+ tx_nents += (blocks * 2);
+
+ while (qup->blk.pos < blocks) {
+ tlen = (i == (blocks - 1)) ? rem : 0;
+ tags = &qup->start_tag.start[off + tx_len];
+ len = qup_i2c_get_tags(tags, qup, msg, 1);
+
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ tags,
+ &qup->start_tag, len,
+ qup, 0, 0);
+
+ tx_len += len;
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &msg->buf[QUP_READ_LIMIT * i],
+ NULL, tlen, qup, 1,
+ DMA_TO_DEVICE);
+ i++;
+ qup->blk.pos = i;
+ }
+ off += tx_len;
+
+ if (qup->cmsg == (qup->num - 1)) {
+ qup->btx.footer_tag.start[0] =
+ QUP_BAM_FLUSH_STOP;
+ qup->btx.footer_tag.start[1] =
+ QUP_BAM_FLUSH_STOP;
+ qup_sg_set_buf(&qup->btx.sg_tx[tx_buf++],
+ &qup->btx.footer_tag.start[0],
+ &qup->btx.footer_tag, 2,
+ qup, 0, 0);
+ tx_nents += 1;
+ }
+ }
+ qup->cmsg++;
+ msg++;
+ }
+
+ txd = dmaengine_prep_slave_sg(qup->btx.dma_tx, qup->btx.sg_tx, tx_nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_PREP_FENCE);
+ if (!txd) {
+ dev_err(qup->dev, "failed to get tx desc\n");
+ ret = -EINVAL;
+ goto desc_err;
+ }
+
+ if (!rx_nents) {
+ txd->callback = qup_i2c_bam_cb;
+ txd->callback_param = qup;
+ }
+
+ cookie_tx = dmaengine_submit(txd);
+ dma_async_issue_pending(qup->btx.dma_tx);
+
+ if (rx_nents) {
+ rxd = dmaengine_prep_slave_sg(qup->brx.dma_rx, qup->brx.sg_rx,
+ rx_nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxd) {
+ dev_err(qup->dev, "failed to get rx desc\n");
+ ret = -EINVAL;
+
+ /* abort TX descriptors */
+ dmaengine_terminate_all(qup->btx.dma_tx);
+ goto desc_err;
+ }
+
+ rxd->callback = qup_i2c_bam_cb;
+ rxd->callback_param = qup;
+ cookie_rx = dmaengine_submit(rxd);
+ dma_async_issue_pending(qup->brx.dma_rx);
+ }
+
+ if (!wait_for_completion_timeout(&qup->xfer, TOUT_MAX * HZ)) {
+ dev_err(qup->dev, "normal trans timed out\n");
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret || qup->bus_err || qup->qup_err) {
+ if (qup->bus_err & QUP_I2C_NACK_FLAG) {
+ msg--;
+ dev_err(qup->dev, "NACK from %x\n", msg->addr);
ret = -EIO;
- goto err;
+
+ if (qup_i2c_change_state(qup, QUP_RUN_STATE)) {
+ dev_err(qup->dev, "change to run state timed out");
+ return ret;
+ }
+
+ if (rx_nents)
+ writel(QUP_BAM_INPUT_EOT,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ writel(QUP_BAM_FLUSH_STOP,
+ qup->base + QUP_OUT_FIFO_BASE);
+
+ qup_i2c_flush(qup);
+
+ /* wait for remaining interrupts to occur */
+ if (!wait_for_completion_timeout(&qup->xfer, HZ))
+ dev_err(qup->dev, "flush timed out\n");
+
+ qup_i2c_rel_dma(qup);
}
+ }
- qup_i2c_read_fifo(qup, msg);
- } while (qup->pos < msg->len);
+ dma_unmap_sg(qup->dev, qup->btx.sg_tx, tx_nents, DMA_TO_DEVICE);
-err:
+ if (rx_nents)
+ dma_unmap_sg(qup->dev, qup->brx.sg_rx, rx_nents,
+ DMA_FROM_DEVICE);
+desc_err:
+ return ret;
+}
+
+static int qup_bam_xfer(struct i2c_adapter *adap, struct i2c_msg *msg)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret = 0;
+
+ enable_irq(qup->irq);
+ if (qup_i2c_req_dma(qup))
+ goto out;
+
+ qup->bus_err = 0;
+ qup->qup_err = 0;
+
+ writel(0, qup->base + QUP_MX_INPUT_CNT);
+ writel(0, qup->base + QUP_MX_OUTPUT_CNT);
+
+ /* set BAM mode */
+ writel(QUP_REPACK_EN | QUP_BAM_MODE, qup->base + QUP_IO_MODE);
+
+ /* mask fifo irqs */
+ writel((0x3 << 8), qup->base + QUP_OPERATIONAL_MASK);
+
+ /* set RUN STATE */
+ ret = qup_i2c_change_state(qup, QUP_RUN_STATE);
+ if (ret)
+ goto out;
+
+ writel(qup->clk_ctl, qup->base + QUP_I2C_CLK_CTL);
+
+ qup->msg = msg;
+ ret = bam_do_xfer(qup, qup->msg);
+out:
disable_irq(qup->irq);
- qup->msg = NULL;
+ qup->msg = NULL;
return ret;
}
@@ -467,6 +1090,8 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
int ret, idx;
+ qup->num = 1;
+
ret = pm_runtime_get_sync(qup->dev);
if (ret < 0)
goto out;
@@ -491,9 +1116,9 @@ static int qup_i2c_xfer(struct i2c_adapter *adap,
}
if (msgs[idx].flags & I2C_M_RD)
- ret = qup_i2c_read_one(qup, &msgs[idx]);
+ ret = qup_i2c_read(qup, &msgs[idx]);
else
- ret = qup_i2c_write_one(qup, &msgs[idx]);
+ ret = qup_i2c_write(qup, &msgs[idx]);
if (ret)
break;
@@ -513,6 +1138,87 @@ out:
return ret;
}
+static int qup_i2c_xfer_v2(struct i2c_adapter *adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ struct qup_i2c_dev *qup = i2c_get_adapdata(adap);
+ int ret, idx, len, use_dma = 0;
+
+ qup->num = num;
+ qup->cmsg = 0;
+
+ ret = pm_runtime_get_sync(qup->dev);
+ if (ret < 0)
+ goto out;
+
+ writel(1, qup->base + QUP_SW_RESET);
+ ret = qup_i2c_poll_state(qup, QUP_RESET_STATE);
+ if (ret)
+ goto out;
+
+ /* Configure QUP as I2C mini core */
+ writel(I2C_MINI_CORE | I2C_N_VAL_V2, qup->base + QUP_CONFIG);
+ writel(QUP_V2_TAGS_EN, qup->base + QUP_I2C_MASTER_GEN);
+
+ if ((qup->is_dma)) {
+ /* All i2c_msgs should be transferred using either dma or cpu */
+ for (idx = 0; idx < num; idx++) {
+ if (msgs[idx].len == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ len = (msgs[idx].len > qup->out_fifo_sz) ||
+ (msgs[idx].len > qup->in_fifo_sz);
+
+ if ((!is_vmalloc_addr(msgs[idx].buf)) && len) {
+ use_dma = 1;
+ } else {
+ use_dma = 0;
+ break;
+ }
+ }
+ }
+
+ for (idx = 0; idx < num; idx++) {
+ if (qup_i2c_poll_state_i2c_master(qup)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ reinit_completion(&qup->xfer);
+
+ len = msgs[idx].len;
+
+ if (use_dma) {
+ ret = qup_bam_xfer(adap, &msgs[idx]);
+ idx = num;
+ } else {
+ if (msgs[idx].flags & I2C_M_RD)
+ ret = qup_i2c_read(qup, &msgs[idx]);
+ else
+ ret = qup_i2c_write(qup, &msgs[idx]);
+ }
+
+ if (ret)
+ break;
+
+ qup->cmsg++;
+ }
+
+ if (!ret)
+ ret = qup_i2c_change_state(qup, QUP_RESET_STATE);
+
+ if (ret == 0)
+ ret = num;
+out:
+ pm_runtime_mark_last_busy(qup->dev);
+ pm_runtime_put_autosuspend(qup->dev);
+
+ return ret;
+}
+
static u32 qup_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -532,6 +1238,11 @@ static struct i2c_adapter_quirks qup_i2c_quirks = {
.max_read_len = QUP_READ_LIMIT,
};
+static const struct i2c_algorithm qup_i2c_algo_v2 = {
+ .master_xfer = qup_i2c_xfer_v2,
+ .functionality = qup_i2c_func,
+};
+
static void qup_i2c_enable_clocks(struct qup_i2c_dev *qup)
{
clk_prepare_enable(qup->clk);
@@ -561,6 +1272,7 @@ static int qup_i2c_probe(struct platform_device *pdev)
int ret, fs_div, hs_div;
int src_clk_freq;
u32 clk_freq = 100000;
+ int blocks;
qup = devm_kzalloc(&pdev->dev, sizeof(*qup), GFP_KERNEL);
if (!qup)
@@ -572,6 +1284,69 @@ static int qup_i2c_probe(struct platform_device *pdev)
of_property_read_u32(node, "clock-frequency", &clk_freq);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,i2c-qup-v1.1.1")) {
+ qup->adap.algo = &qup_i2c_algo;
+ qup->qup_i2c_write_one = qup_i2c_write_one_v1;
+ qup->qup_i2c_read_one = qup_i2c_read_one_v1;
+ } else {
+ qup->adap.algo = &qup_i2c_algo_v2;
+ qup->qup_i2c_write_one = qup_i2c_write_one_v2;
+ qup->qup_i2c_read_one = qup_i2c_read_one_v2;
+ qup->use_v2_tags = 1;
+
+ if (qup_i2c_req_dma(qup))
+ goto nodma;
+
+ blocks = (MX_BLOCKS << 1) + 1;
+ qup->btx.sg_tx = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg_tx) * blocks,
+ GFP_KERNEL);
+ if (!qup->btx.sg_tx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sg_init_table(qup->btx.sg_tx, blocks);
+
+ qup->brx.sg_rx = devm_kzalloc(&pdev->dev,
+ sizeof(*qup->btx.sg_tx) * blocks,
+ GFP_KERNEL);
+ if (!qup->brx.sg_rx) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ sg_init_table(qup->brx.sg_rx, blocks);
+
+ /* 2 tag bytes for each block + 5 for start, stop tags */
+ size = blocks * 2 + 5;
+ qup->dpool = dma_pool_create("qup_i2c-dma-pool", &pdev->dev,
+ size, 4, 0);
+
+ qup->start_tag.start = dma_pool_alloc(qup->dpool, GFP_KERNEL,
+ &qup->start_tag.addr);
+ if (!qup->start_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ qup->brx.scratch_tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->brx.scratch_tag.addr);
+ if (!qup->brx.scratch_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ qup->btx.footer_tag.start = dma_pool_alloc(qup->dpool,
+ GFP_KERNEL,
+ &qup->btx.footer_tag.addr);
+ if (!qup->btx.footer_tag.start) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ qup->is_dma = 1;
+ }
+
+nodma:
/* We support frequencies up to FAST Mode (400KHz) */
if (!clk_freq || clk_freq > 400000) {
dev_err(qup->dev, "clock frequency not supported %d\n",
@@ -667,7 +1442,6 @@ static int qup_i2c_probe(struct platform_device *pdev)
qup->out_blk_sz, qup->out_fifo_sz);
i2c_set_adapdata(&qup->adap, qup);
- qup->adap.algo = &qup_i2c_algo;
qup->adap.quirks = &qup_i2c_quirks;
qup->adap.dev.parent = qup->dev;
qup->adap.dev.of_node = pdev->dev.of_node;
@@ -688,6 +1462,11 @@ fail_runtime:
pm_runtime_disable(qup->dev);
pm_runtime_set_suspended(qup->dev);
fail:
+ if (qup->btx.dma_tx)
+ dma_release_channel(qup->btx.dma_tx);
+ if (qup->brx.dma_rx)
+ dma_release_channel(qup->brx.dma_rx);
+
qup_i2c_disable_clocks(qup);
return ret;
}
@@ -696,6 +1475,18 @@ static int qup_i2c_remove(struct platform_device *pdev)
{
struct qup_i2c_dev *qup = platform_get_drvdata(pdev);
+ if (qup->is_dma) {
+ dma_pool_free(qup->dpool, qup->start_tag.start,
+ qup->start_tag.addr);
+ dma_pool_free(qup->dpool, qup->brx.scratch_tag.start,
+ qup->brx.scratch_tag.addr);
+ dma_pool_free(qup->dpool, qup->btx.footer_tag.start,
+ qup->btx.footer_tag.addr);
+ dma_pool_destroy(qup->dpool);
+ dma_release_channel(qup->btx.dma_tx);
+ dma_release_channel(qup->brx.dma_rx);
+ }
+
disable_irq(qup->irq);
qup_i2c_disable_clocks(qup);
i2c_del_adapter(&qup->adap);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index cbe6a890a93a..2cc40e352c6a 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -64,7 +64,6 @@ config MSM_IOMMU
bool "MSM IOMMU Support"
depends on ARM
depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST
- depends on BROKEN
select IOMMU_API
help
Support for the IOMMUs found on certain Qualcomm SOCs.
@@ -77,6 +76,8 @@ config IOMMU_PGTABLES_L2
def_bool y
depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n
+source "drivers/iommu/qcom/Kconfig"
+
# AMD IOMMU support
config AMD_IOMMU
bool "AMD IOMMU support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index c6dcc513d711..c665bbb5704d 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -5,7 +5,8 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
obj-$(CONFIG_IOMMU_IOVA) += iova.o
obj-$(CONFIG_OF_IOMMU) += of_iommu.o
-obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
+obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom/
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index e321fa517a45..cf871ffc2aa3 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -27,6 +27,8 @@
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of_iommu.h>
#include <asm/cacheflush.h>
#include <asm/sizes.h>
@@ -39,15 +41,12 @@ __asm__ __volatile__ ( \
" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
: "=r" (reg))
-#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
-#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
-
/* bitmap of the page sizes currently supported */
#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
-
-static int msm_iommu_tex_class[4];
+#define DUMMY_CONTEXT 1
DEFINE_SPINLOCK(msm_iommu_lock);
+static LIST_HEAD(qcom_iommu_devices);
struct msm_priv {
unsigned long *pgtable;
@@ -55,40 +54,87 @@ struct msm_priv {
struct iommu_domain domain;
};
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return (struct device *) DUMMY_CONTEXT;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
static struct msm_priv *to_msm_priv(struct iommu_domain *dom)
{
return container_of(dom, struct msm_priv, domain);
}
-static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+static int __enable_clocks(struct msm_iommu_dev *iommu)
{
int ret;
- ret = clk_enable(drvdata->pclk);
+ ret = clk_enable(iommu->pclk);
if (ret)
goto fail;
- if (drvdata->clk) {
- ret = clk_enable(drvdata->clk);
+ if (iommu->clk) {
+ ret = clk_enable(iommu->clk);
if (ret)
- clk_disable(drvdata->pclk);
+ clk_disable(iommu->pclk);
}
fail:
return ret;
}
-static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+static void __disable_clocks(struct msm_iommu_dev *iommu)
{
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
+ if (iommu->clk)
+ clk_disable(iommu->clk);
+ clk_disable(iommu->pclk);
+}
+
+static void msm_iommu_reset(void __iomem *base, int ncb)
+{
+ int ctx;
+
+ SET_RPUE(base, 0);
+ SET_RPUEIE(base, 0);
+ SET_ESRRESTORE(base, 0);
+ SET_TBE(base, 0);
+ SET_CR(base, 0);
+ SET_SPDMBE(base, 0);
+ SET_TESTBUSCR(base, 0);
+ SET_TLBRSW(base, 0);
+ SET_GLOBAL_TLBIALL(base, 0);
+ SET_RPU_ACR(base, 0);
+ SET_TLBLKCRWE(base, 1);
+
+ for (ctx = 0; ctx < ncb; ctx++) {
+ SET_BPRCOSH(base, ctx, 0);
+ SET_BPRCISH(base, ctx, 0);
+ SET_BPRCNSH(base, ctx, 0);
+ SET_BPSHCFG(base, ctx, 0);
+ SET_BPMTCFG(base, ctx, 0);
+ SET_ACTLR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_BFBCR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_CTX_TLBIALL(base, ctx, 0);
+ SET_TLBFLPTER(base, ctx, 0);
+ SET_TLBSLPTER(base, ctx, 0);
+ SET_TLBLKCR(base, ctx, 0);
+ SET_CONTEXTIDR(base, ctx, 0);
+ }
}
static int __flush_iotlb(struct iommu_domain *domain)
{
struct msm_priv *priv = to_msm_priv(domain);
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu = NULL;
+ struct msm_iommu_ctx_dev *master;
int ret = 0;
+
#ifndef CONFIG_IOMMU_PGTABLES_L2
unsigned long *fl_table = priv->pgtable;
int i;
@@ -105,24 +151,67 @@ static int __flush_iotlb(struct iommu_domain *domain)
}
#endif
- list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
-
- BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
-
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
- BUG_ON(!iommu_drvdata);
-
- ret = __enable_clocks(iommu_drvdata);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0);
- __disable_clocks(iommu_drvdata);
+ list_for_each_entry(master, &iommu->ctx_list, list)
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+
+ __disable_clocks(iommu);
}
fail:
return ret;
}
+static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end)
+{
+ int idx;
+
+ do {
+ idx = find_next_zero_bit(map, end, start);
+ if (idx == end)
+ return -ENOSPC;
+ } while (test_and_set_bit(idx, map));
+
+ return idx;
+}
+
+static void msm_iommu_free_ctx(unsigned long *map, int idx)
+{
+ clear_bit(idx, map);
+}
+
+static void config_mids(struct msm_iommu_dev *iommu,
+ struct msm_iommu_ctx_dev *master)
+{
+ int mid, ctx, i;
+
+ for (i = 0; i < master->num_mids; i++) {
+ mid = master->mids[i];
+ ctx = master->num;
+
+ SET_M2VCBR_N(iommu->base, mid, 0);
+ SET_CBACR_N(iommu->base, ctx, 0);
+
+ /* Set VMID = 0 */
+ SET_VMID(iommu->base, mid, 0);
+
+ /* Set the context number for that MID to this context */
+ SET_CBNDX(iommu->base, mid, ctx);
+
+ /* Set MID associated with this context bank to 0*/
+ SET_CBVMID(iommu->base, ctx, 0);
+
+ /* Set the ASID for TLB tagging for this context */
+ SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx);
+
+ /* Set security bit override to be Non-secure */
+ SET_NSCFG(iommu->base, mid, 3);
+ }
+}
+
static void __reset_context(void __iomem *base, int ctx)
{
SET_BPRCOSH(base, ctx, 0);
@@ -143,13 +232,10 @@ static void __reset_context(void __iomem *base, int ctx)
SET_TLBFLPTER(base, ctx, 0);
SET_TLBSLPTER(base, ctx, 0);
SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
}
static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
{
- unsigned int prrr, nmrr;
__reset_context(base, ctx);
/* Set up HTW mode */
@@ -179,15 +265,6 @@ static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable)
SET_RCOSH(base, ctx, 1);
SET_RCNSH(base, ctx, 1);
- /* Turn on TEX Remap */
- SET_TRE(base, ctx, 1);
-
- /* Set TEX remap attributes */
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
- SET_PRRR(base, ctx, prrr);
- SET_NMRR(base, ctx, nmrr);
-
/* Turn on BFB prefetch */
SET_BFBDFE(base, ctx, 1);
@@ -272,94 +349,76 @@ static void msm_iommu_domain_free(struct iommu_domain *domain)
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- struct msm_iommu_ctx_drvdata *tmp_drvdata;
int ret = 0;
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_dev *master;
spin_lock_irqsave(&msm_iommu_lock, flags);
-
- priv = to_msm_priv(domain);
-
- if (!dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev) {
- ret = -EINVAL;
- goto fail;
- }
-
- if (!list_empty(&ctx_drvdata->attached_elm)) {
- ret = -EBUSY;
- goto fail;
- }
-
- list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
- if (tmp_drvdata == ctx_drvdata) {
- ret = -EBUSY;
- goto fail;
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ if (master->num) {
+ dev_err(dev, "domain already attached");
+ ret = -EEXIST;
+ goto fail;
+ }
+ master->num =
+ msm_iommu_alloc_ctx(iommu->context_map,
+ 0, iommu->ncb);
+ if (IS_ERR_VALUE(master->num)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+ config_mids(iommu, master);
+ __program_context(iommu->base, master->num,
+ __pa(priv->pgtable));
+ }
+ __disable_clocks(iommu);
+ list_add(&iommu->dom_node, &priv->list_attached);
}
+ }
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __program_context(iommu_drvdata->base, ctx_dev->num,
- __pa(priv->pgtable));
-
- __disable_clocks(iommu_drvdata);
- list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
ret = __flush_iotlb(domain);
-
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
return ret;
}
static void msm_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
- struct msm_priv *priv;
- struct msm_iommu_ctx_dev *ctx_dev;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_priv *priv = to_msm_priv(domain);
unsigned long flags;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
int ret;
spin_lock_irqsave(&msm_iommu_lock, flags);
- priv = to_msm_priv(domain);
-
- if (!dev)
- goto fail;
-
- iommu_drvdata = dev_get_drvdata(dev->parent);
- ctx_drvdata = dev_get_drvdata(dev);
- ctx_dev = dev->platform_data;
-
- if (!iommu_drvdata || !ctx_drvdata || !ctx_dev)
- goto fail;
-
ret = __flush_iotlb(domain);
if (ret)
goto fail;
- ret = __enable_clocks(iommu_drvdata);
- if (ret)
- goto fail;
-
- __reset_context(iommu_drvdata->base, ctx_dev->num);
- __disable_clocks(iommu_drvdata);
- list_del_init(&ctx_drvdata->attached_elm);
+ list_for_each_entry(iommu, &priv->list_attached, dom_node) {
+ ret = __enable_clocks(iommu);
+ if (ret)
+ goto fail;
+ list_for_each_entry(master, &iommu->ctx_list, list) {
+ msm_iommu_free_ctx(iommu->context_map, master->num);
+ __reset_context(iommu->base, master->num);
+ }
+ __disable_clocks(iommu);
+ }
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
}
@@ -376,12 +435,14 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- int ret = 0, tex, sh;
+ int ret = 0, tex = 0, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
sh = (prot & MSM_IOMMU_ATTR_SH) ? 1 : 0;
- tex = msm_iommu_tex_class[prot & MSM_IOMMU_CP_MASK];
+
+ if (prot & IOMMU_CACHE)
+ tex = FL_BUFFERABLE | FL_CACHEABLE;
if (tex < 0 || tex > NUM_TEX_CLASS - 1) {
ret = -EINVAL;
@@ -424,12 +485,12 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
int i = 0;
for (i = 0; i < 16; i++)
*(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION |
- FL_AP_READ | FL_AP_WRITE | FL_TYPE_SECT |
+ FL_AP1 | FL_AP0 | FL_TYPE_SECT |
FL_SHARED | FL_NG | pgprot;
}
if (len == SZ_1M)
- *fl_pte = (pa & 0xFFF00000) | FL_AP_READ | FL_AP_WRITE | FL_NG |
+ *fl_pte = (pa & 0xFFF00000) | FL_AP1 | FL_AP0 | FL_NG |
FL_TYPE_SECT | FL_SHARED | pgprot;
/* Need a 2nd level table */
@@ -555,47 +616,46 @@ static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t va)
{
struct msm_priv *priv;
- struct msm_iommu_drvdata *iommu_drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_dev *iommu;
+ struct msm_iommu_ctx_dev *master;
unsigned int par;
unsigned long flags;
- void __iomem *base;
phys_addr_t ret = 0;
- int ctx;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = to_msm_priv(domain);
- if (list_empty(&priv->list_attached))
- goto fail;
+ iommu = list_first_entry(&priv->list_attached,
+ struct msm_iommu_dev, dom_node);
- ctx_drvdata = list_entry(priv->list_attached.next,
- struct msm_iommu_ctx_drvdata, attached_elm);
- iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ if (list_empty(&iommu->ctx_list))
+ goto fail;
- base = iommu_drvdata->base;
- ctx = ctx_drvdata->num;
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev, list);
+ if (!master)
+ goto fail;
- ret = __enable_clocks(iommu_drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
/* Invalidate context TLB */
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_V2PPR(base, ctx, va & V2Pxx_VA);
+ SET_CTX_TLBIALL(iommu->base, master->num, 0);
+ SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA);
- par = GET_PAR(base, ctx);
+ par = GET_PAR(iommu->base, master->num);
/* We are dealing with a supersection */
- if (GET_NOFAULT_SS(base, ctx))
+ if (GET_NOFAULT_SS(iommu->base, master->num))
ret = (par & 0xFF000000) | (va & 0x00FFFFFF);
else /* Upper 20 bits from PAR, lower 12 from VA */
ret = (par & 0xFFFFF000) | (va & 0x00000FFF);
- if (GET_FAULT(base, ctx))
+ if (GET_FAULT(iommu->base, master->num))
ret = 0;
- __disable_clocks(iommu_drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
return ret;
@@ -629,49 +689,82 @@ static void print_ctx_regs(void __iomem *base, int ctx)
GET_TTBR0(base, ctx), GET_TTBR1(base, ctx));
pr_err("SCTLR = %08x ACTLR = %08x\n",
GET_SCTLR(base, ctx), GET_ACTLR(base, ctx));
- pr_err("PRRR = %08x NMRR = %08x\n",
- GET_PRRR(base, ctx), GET_NMRR(base, ctx));
+}
+
+static void insert_iommu_master(struct device *dev,
+ struct msm_iommu_dev *iommu,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_ctx_dev *master;
+ int sid;
+
+ master = kzalloc(sizeof(*master), GFP_KERNEL);
+ master->of_node = dev->of_node;
+ list_add(&master->list, &iommu->ctx_list);
+
+ for (sid = 0; sid < spec->args_count; sid++)
+ master->mids[sid] = spec->args[sid];
+
+ master->num_mids = spec->args_count;
+}
+
+static int qcom_iommu_of_xlate(struct device *dev,
+ struct of_phandle_args *spec)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ if (iommu->dev->of_node == spec->np)
+ break;
+ }
+
+ if (!iommu || (iommu->dev->of_node != spec->np))
+ return -ENODEV;
+
+ insert_iommu_master(dev, iommu, spec);
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return 0;
}
irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id)
{
- struct msm_iommu_drvdata *drvdata = dev_id;
- void __iomem *base;
+ struct msm_iommu_dev *iommu = dev_id;
unsigned int fsr;
int i, ret;
spin_lock(&msm_iommu_lock);
- if (!drvdata) {
+ if (!iommu) {
pr_err("Invalid device ID in context interrupt handler\n");
goto fail;
}
- base = drvdata->base;
-
pr_err("Unexpected IOMMU page fault!\n");
- pr_err("base = %08x\n", (unsigned int) base);
+ pr_err("base = %08x\n", (unsigned int)iommu->base);
- ret = __enable_clocks(drvdata);
+ ret = __enable_clocks(iommu);
if (ret)
goto fail;
- for (i = 0; i < drvdata->ncb; i++) {
- fsr = GET_FSR(base, i);
+ for (i = 0; i < iommu->ncb; i++) {
+ fsr = GET_FSR(iommu->base, i);
if (fsr) {
pr_err("Fault occurred in context %d.\n", i);
pr_err("Interesting registers:\n");
- print_ctx_regs(base, i);
- SET_FSR(base, i, 0x4000000F);
+ print_ctx_regs(iommu->base, i);
+ SET_FSR(iommu->base, i, 0x4000000F);
}
}
- __disable_clocks(drvdata);
+ __disable_clocks(iommu);
fail:
spin_unlock(&msm_iommu_lock);
return 0;
}
-static const struct iommu_ops msm_iommu_ops = {
+static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
@@ -682,54 +775,163 @@ static const struct iommu_ops msm_iommu_ops = {
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .of_xlate = qcom_iommu_of_xlate,
};
-static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+static int msm_iommu_probe(struct platform_device *pdev)
{
- int i = 0;
- unsigned int prrr = 0;
- unsigned int nmrr = 0;
- int c_icp, c_ocp, c_mt, c_nos;
-
- RCP15_PRRR(prrr);
- RCP15_NMRR(nmrr);
-
- for (i = 0; i < NUM_TEX_CLASS; i++) {
- c_nos = PRRR_NOS(prrr, i);
- c_mt = PRRR_MT(prrr, i);
- c_icp = NMRR_ICP(nmrr, i);
- c_ocp = NMRR_OCP(nmrr, i);
-
- if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
- return i;
+ struct resource *r;
+ struct msm_iommu_dev *iommu;
+ int ret, par, val;
+
+ iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENODEV;
+
+ iommu->dev = &pdev->dev;
+ INIT_LIST_HEAD(&iommu->ctx_list);
+
+ iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
+ if (IS_ERR(iommu->pclk)) {
+ dev_err(iommu->dev, "could not get smmu_pclk\n");
+ return PTR_ERR(iommu->pclk);
+ }
+ ret = clk_prepare(iommu->pclk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare smmu_pclk\n");
+ return ret;
+ }
+
+ iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
+ if (IS_ERR(iommu->clk)) {
+ dev_err(iommu->dev, "could not get iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return PTR_ERR(iommu->clk);
+ }
+ ret = clk_prepare(iommu->clk);
+ if (ret) {
+ dev_err(iommu->dev, "could not prepare iommu_clk\n");
+ clk_unprepare(iommu->pclk);
+ return ret;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(iommu->dev, r);
+ if (IS_ERR(iommu->base)) {
+ dev_err(iommu->dev, "could not get iommu base\n");
+ ret = PTR_ERR(iommu->base);
+ goto fail;
+ }
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(iommu->dev, "could not get iommu irq\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = of_property_read_u32(iommu->dev->of_node, "ncb", &val);
+ if (ret) {
+ dev_err(iommu->dev, "could not get ncb\n");
+ goto fail;
+ }
+ iommu->ncb = val;
+
+ msm_iommu_reset(iommu->base, iommu->ncb);
+ SET_M(iommu->base, 0, 1);
+ SET_PAR(iommu->base, 0, 0);
+ SET_V2PCFG(iommu->base, 0, 1);
+ SET_V2PPR(iommu->base, 0, 0);
+ par = GET_PAR(iommu->base, 0);
+ SET_V2PCFG(iommu->base, 0, 0);
+ SET_M(iommu->base, 0, 0);
+
+ if (!par) {
+ pr_err("Invalid PAR value detected\n");
+ ret = -ENODEV;
+ goto fail;
}
- return -ENODEV;
+ ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL,
+ msm_iommu_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irpt_handler",
+ iommu);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret);
+ goto fail;
+ }
+
+ list_add(&iommu->dev_node, &qcom_iommu_devices);
+ of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ platform_set_drvdata(pdev, iommu);
+
+ pr_info("device mapped at %p, irq %d with %d ctx banks\n",
+ iommu->base, iommu->irq, iommu->ncb);
+
+ return 0;
+fail:
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return ret;
}
-static void __init setup_iommu_tex_classes(void)
+static const struct of_device_id msm_iommu_dt_match[] = {
+ { .compatible = "qcom,iommu-v0"},
+ {}
+};
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_dev *iommu = platform_get_drvdata(pdev);
+
+ clk_unprepare(iommu->clk);
+ clk_unprepare(iommu->pclk);
+ return 0;
+}
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_dt_match,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
{
- msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
- get_tex_class(CP_NONCACHED, CP_NONCACHED, MT_NORMAL, 1);
+ int ret;
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
- get_tex_class(CP_WB_WA, CP_WB_WA, MT_NORMAL, 1);
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret != 0)
+ pr_err("Failed to register IOMMU driver\n");
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
- get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_NORMAL, 1);
+ return ret;
+}
- msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
- get_tex_class(CP_WT, CP_WT, MT_NORMAL, 1);
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_driver);
}
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
static int __init msm_iommu_init(void)
{
- setup_iommu_tex_classes();
bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
return 0;
}
-subsys_initcall(msm_iommu_init);
+static int __init msm_iommu_of_setup(struct device_node *np)
+{
+ msm_iommu_init();
+ return 0;
+}
+
+IOMMU_OF_DECLARE(msm_iommu_of, "qcom,iommu-v0", msm_iommu_of_setup);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 5c7c955e6d25..4ca25d50d679 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -42,74 +42,53 @@
*/
#define MAX_NUM_MIDS 32
+/* Maximum number of context banks that can be present in IOMMU */
+#define IOMMU_MAX_CBS 128
+
/**
* struct msm_iommu_dev - a single IOMMU hardware instance
- * name Human-readable name given to this IOMMU HW instance
* ncb Number of context banks present on this IOMMU HW instance
+ * dev: IOMMU device
+ * irq: Interrupt number
+ * clk: The bus clock for this IOMMU hardware instance
+ * pclk: The clock for the IOMMU bus interconnect
+ * dev_node: list head in qcom_iommu_device_list
+ * dom_node: list head for domain
+ * ctx_list: list of 'struct msm_iommu_ctx_dev'
+ * context_map: Bitmap to track allocated context banks
*/
struct msm_iommu_dev {
- const char *name;
+ void __iomem *base;
int ncb;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
+ struct clk *pclk;
+ struct list_head dev_node;
+ struct list_head dom_node;
+ struct list_head ctx_list;
+ DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
};
/**
* struct msm_iommu_ctx_dev - an IOMMU context bank instance
- * name Human-readable name given to this context bank
+ * of_node node ptr of client device
* num Index of this context bank within the hardware
* mids List of Machine IDs that are to be mapped into this context
* bank, terminated by -1. The MID is a set of signals on the
* AXI bus that identifies the function associated with a specific
* memory request. (See ARM spec).
+ * num_mids Total number of mids
+ * node list head in ctx_list
*/
struct msm_iommu_ctx_dev {
- const char *name;
+ struct device_node *of_node;
int num;
int mids[MAX_NUM_MIDS];
+ int num_mids;
+ struct list_head list;
};
-
-/**
- * struct msm_iommu_drvdata - A single IOMMU hardware instance
- * @base: IOMMU config port base address (VA)
- * @ncb The number of contexts on this IOMMU
- * @irq: Interrupt number
- * @clk: The bus clock for this IOMMU hardware instance
- * @pclk: The clock for the IOMMU bus interconnect
- *
- * A msm_iommu_drvdata holds the global driver data about a single piece
- * of an IOMMU hardware instance.
- */
-struct msm_iommu_drvdata {
- void __iomem *base;
- int irq;
- int ncb;
- struct clk *clk;
- struct clk *pclk;
-};
-
-/**
- * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
- * @num: Hardware context number of this context
- * @pdev: Platform device associated wit this HW instance
- * @attached_elm: List element for domains to track which devices are
- * attached to them
- *
- * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
- * within each IOMMU hardware instance
- */
-struct msm_iommu_ctx_drvdata {
- int num;
- struct platform_device *pdev;
- struct list_head attached_elm;
-};
-
-/*
- * Look up an IOMMU context device by its context name. NULL if none found.
- * Useful for testing and drivers that do not yet fully have IOMMU stuff in
- * their platform devices.
- */
-struct device *msm_iommu_get_ctx(const char *ctx_name);
-
/*
* Interrupt handler for the IOMMU context fault interrupt. Hooking the
* interrupt is not supported in the API yet, but this will print an error
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
deleted file mode 100644
index b6d01f97e537..000000000000
--- a/drivers/iommu/msm_iommu_dev.c
+++ /dev/null
@@ -1,392 +0,0 @@
-/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/iommu.h>
-#include <linux/interrupt.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include "msm_iommu_hw-8xxx.h"
-#include "msm_iommu.h"
-
-struct iommu_ctx_iter_data {
- /* input */
- const char *name;
-
- /* output */
- struct device *dev;
-};
-
-static struct platform_device *msm_iommu_root_dev;
-
-static int each_iommu_ctx(struct device *dev, void *data)
-{
- struct iommu_ctx_iter_data *res = data;
- struct msm_iommu_ctx_dev *c = dev->platform_data;
-
- if (!res || !c || !c->name || !res->name)
- return -EINVAL;
-
- if (!strcmp(res->name, c->name)) {
- res->dev = dev;
- return 1;
- }
- return 0;
-}
-
-static int each_iommu(struct device *dev, void *data)
-{
- return device_for_each_child(dev, data, each_iommu_ctx);
-}
-
-struct device *msm_iommu_get_ctx(const char *ctx_name)
-{
- struct iommu_ctx_iter_data r;
- int found;
-
- if (!msm_iommu_root_dev) {
- pr_err("No root IOMMU device.\n");
- goto fail;
- }
-
- r.name = ctx_name;
- found = device_for_each_child(&msm_iommu_root_dev->dev, &r, each_iommu);
-
- if (!found) {
- pr_err("Could not find context <%s>\n", ctx_name);
- goto fail;
- }
-
- return r.dev;
-fail:
- return NULL;
-}
-EXPORT_SYMBOL(msm_iommu_get_ctx);
-
-static void msm_iommu_reset(void __iomem *base, int ncb)
-{
- int ctx;
-
- SET_RPUE(base, 0);
- SET_RPUEIE(base, 0);
- SET_ESRRESTORE(base, 0);
- SET_TBE(base, 0);
- SET_CR(base, 0);
- SET_SPDMBE(base, 0);
- SET_TESTBUSCR(base, 0);
- SET_TLBRSW(base, 0);
- SET_GLOBAL_TLBIALL(base, 0);
- SET_RPU_ACR(base, 0);
- SET_TLBLKCRWE(base, 1);
-
- for (ctx = 0; ctx < ncb; ctx++) {
- SET_BPRCOSH(base, ctx, 0);
- SET_BPRCISH(base, ctx, 0);
- SET_BPRCNSH(base, ctx, 0);
- SET_BPSHCFG(base, ctx, 0);
- SET_BPMTCFG(base, ctx, 0);
- SET_ACTLR(base, ctx, 0);
- SET_SCTLR(base, ctx, 0);
- SET_FSRRESTORE(base, ctx, 0);
- SET_TTBR0(base, ctx, 0);
- SET_TTBR1(base, ctx, 0);
- SET_TTBCR(base, ctx, 0);
- SET_BFBCR(base, ctx, 0);
- SET_PAR(base, ctx, 0);
- SET_FAR(base, ctx, 0);
- SET_CTX_TLBIALL(base, ctx, 0);
- SET_TLBFLPTER(base, ctx, 0);
- SET_TLBSLPTER(base, ctx, 0);
- SET_TLBLKCR(base, ctx, 0);
- SET_PRRR(base, ctx, 0);
- SET_NMRR(base, ctx, 0);
- SET_CONTEXTIDR(base, ctx, 0);
- }
-}
-
-static int msm_iommu_probe(struct platform_device *pdev)
-{
- struct resource *r;
- struct clk *iommu_clk;
- struct clk *iommu_pclk;
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
- void __iomem *regs_base;
- int ret, irq, par;
-
- if (pdev->id == -1) {
- msm_iommu_root_dev = pdev;
- return 0;
- }
-
- drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
-
- if (!drvdata) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (!iommu_dev) {
- ret = -ENODEV;
- goto fail;
- }
-
- iommu_pclk = clk_get(NULL, "smmu_pclk");
- if (IS_ERR(iommu_pclk)) {
- ret = -ENODEV;
- goto fail;
- }
-
- ret = clk_prepare_enable(iommu_pclk);
- if (ret)
- goto fail_enable;
-
- iommu_clk = clk_get(&pdev->dev, "iommu_clk");
-
- if (!IS_ERR(iommu_clk)) {
- if (clk_get_rate(iommu_clk) == 0)
- clk_set_rate(iommu_clk, 1);
-
- ret = clk_prepare_enable(iommu_clk);
- if (ret) {
- clk_put(iommu_clk);
- goto fail_pclk;
- }
- } else
- iommu_clk = NULL;
-
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "physbase");
- regs_base = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(regs_base)) {
- ret = PTR_ERR(regs_base);
- goto fail_clk;
- }
-
- irq = platform_get_irq_byname(pdev, "secure_irq");
- if (irq < 0) {
- ret = -ENODEV;
- goto fail_clk;
- }
-
- msm_iommu_reset(regs_base, iommu_dev->ncb);
-
- SET_M(regs_base, 0, 1);
- SET_PAR(regs_base, 0, 0);
- SET_V2PCFG(regs_base, 0, 1);
- SET_V2PPR(regs_base, 0, 0);
- par = GET_PAR(regs_base, 0);
- SET_V2PCFG(regs_base, 0, 0);
- SET_M(regs_base, 0, 0);
-
- if (!par) {
- pr_err("%s: Invalid PAR value detected\n", iommu_dev->name);
- ret = -ENODEV;
- goto fail_clk;
- }
-
- ret = request_irq(irq, msm_iommu_fault_handler, 0,
- "msm_iommu_secure_irpt_handler", drvdata);
- if (ret) {
- pr_err("Request IRQ %d failed with ret=%d\n", irq, ret);
- goto fail_clk;
- }
-
-
- drvdata->pclk = iommu_pclk;
- drvdata->clk = iommu_clk;
- drvdata->base = regs_base;
- drvdata->irq = irq;
- drvdata->ncb = iommu_dev->ncb;
-
- pr_info("device %s mapped at %p, irq %d with %d ctx banks\n",
- iommu_dev->name, regs_base, irq, iommu_dev->ncb);
-
- platform_set_drvdata(pdev, drvdata);
-
- clk_disable(iommu_clk);
-
- clk_disable(iommu_pclk);
-
- return 0;
-fail_clk:
- if (iommu_clk) {
- clk_disable(iommu_clk);
- clk_put(iommu_clk);
- }
-fail_pclk:
- clk_disable_unprepare(iommu_pclk);
-fail_enable:
- clk_put(iommu_pclk);
-fail:
- kfree(drvdata);
- return ret;
-}
-
-static int msm_iommu_remove(struct platform_device *pdev)
-{
- struct msm_iommu_drvdata *drv = NULL;
-
- drv = platform_get_drvdata(pdev);
- if (drv) {
- if (drv->clk) {
- clk_unprepare(drv->clk);
- clk_put(drv->clk);
- }
- clk_unprepare(drv->pclk);
- clk_put(drv->pclk);
- memset(drv, 0, sizeof(*drv));
- kfree(drv);
- }
- return 0;
-}
-
-static int msm_iommu_ctx_probe(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
- struct msm_iommu_drvdata *drvdata;
- struct msm_iommu_ctx_drvdata *ctx_drvdata;
- int i, ret;
-
- if (!c || !pdev->dev.parent)
- return -EINVAL;
-
- drvdata = dev_get_drvdata(pdev->dev.parent);
- if (!drvdata)
- return -ENODEV;
-
- ctx_drvdata = kzalloc(sizeof(*ctx_drvdata), GFP_KERNEL);
- if (!ctx_drvdata)
- return -ENOMEM;
-
- ctx_drvdata->num = c->num;
- ctx_drvdata->pdev = pdev;
-
- INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
- platform_set_drvdata(pdev, ctx_drvdata);
-
- ret = clk_prepare_enable(drvdata->pclk);
- if (ret)
- goto fail;
-
- if (drvdata->clk) {
- ret = clk_prepare_enable(drvdata->clk);
- if (ret) {
- clk_disable_unprepare(drvdata->pclk);
- goto fail;
- }
- }
-
- /* Program the M2V tables for this context */
- for (i = 0; i < MAX_NUM_MIDS; i++) {
- int mid = c->mids[i];
- if (mid == -1)
- break;
-
- SET_M2VCBR_N(drvdata->base, mid, 0);
- SET_CBACR_N(drvdata->base, c->num, 0);
-
- /* Set VMID = 0 */
- SET_VMID(drvdata->base, mid, 0);
-
- /* Set the context number for that MID to this context */
- SET_CBNDX(drvdata->base, mid, c->num);
-
- /* Set MID associated with this context bank to 0*/
- SET_CBVMID(drvdata->base, c->num, 0);
-
- /* Set the ASID for TLB tagging for this context */
- SET_CONTEXTIDR_ASID(drvdata->base, c->num, c->num);
-
- /* Set security bit override to be Non-secure */
- SET_NSCFG(drvdata->base, mid, 3);
- }
-
- clk_disable(drvdata->clk);
- clk_disable(drvdata->pclk);
-
- dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
- return 0;
-fail:
- kfree(ctx_drvdata);
- return ret;
-}
-
-static int msm_iommu_ctx_remove(struct platform_device *pdev)
-{
- struct msm_iommu_ctx_drvdata *drv = NULL;
- drv = platform_get_drvdata(pdev);
- if (drv) {
- memset(drv, 0, sizeof(struct msm_iommu_ctx_drvdata));
- kfree(drv);
- }
- return 0;
-}
-
-static struct platform_driver msm_iommu_driver = {
- .driver = {
- .name = "msm_iommu",
- },
- .probe = msm_iommu_probe,
- .remove = msm_iommu_remove,
-};
-
-static struct platform_driver msm_iommu_ctx_driver = {
- .driver = {
- .name = "msm_iommu_ctx",
- },
- .probe = msm_iommu_ctx_probe,
- .remove = msm_iommu_ctx_remove,
-};
-
-static int __init msm_iommu_driver_init(void)
-{
- int ret;
- ret = platform_driver_register(&msm_iommu_driver);
- if (ret != 0) {
- pr_err("Failed to register IOMMU driver\n");
- goto error;
- }
-
- ret = platform_driver_register(&msm_iommu_ctx_driver);
- if (ret != 0) {
- platform_driver_unregister(&msm_iommu_driver);
- pr_err("Failed to register IOMMU context driver\n");
- goto error;
- }
-
-error:
- return ret;
-}
-
-static void __exit msm_iommu_driver_exit(void)
-{
- platform_driver_unregister(&msm_iommu_ctx_driver);
- platform_driver_unregister(&msm_iommu_driver);
-}
-
-subsys_initcall(msm_iommu_driver_init);
-module_exit(msm_iommu_driver_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
diff --git a/drivers/iommu/msm_iommu_hw-8xxx.h b/drivers/iommu/msm_iommu_hw-8xxx.h
index fc160101dead..84ba573927d1 100644
--- a/drivers/iommu/msm_iommu_hw-8xxx.h
+++ b/drivers/iommu/msm_iommu_hw-8xxx.h
@@ -61,8 +61,9 @@ do { \
#define FL_TYPE_TABLE (1 << 0)
#define FL_TYPE_SECT (2 << 0)
#define FL_SUPERSECTION (1 << 18)
-#define FL_AP_WRITE (1 << 10)
-#define FL_AP_READ (1 << 11)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
#define FL_SHARED (1 << 16)
#define FL_BUFFERABLE (1 << 2)
#define FL_CACHEABLE (1 << 3)
@@ -77,6 +78,7 @@ do { \
#define SL_TYPE_SMALL (2 << 0)
#define SL_AP0 (1 << 4)
#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
#define SL_SHARED (1 << 10)
#define SL_BUFFERABLE (1 << 2)
#define SL_CACHEABLE (1 << 3)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 60ba238090d9..37b7c3e4c0b6 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -133,6 +133,19 @@ struct iommu_ops *of_iommu_get_ops(struct device_node *np)
return ops;
}
+/**
+ * of_iommu_configure - Configure and return the IOMMU for a device
+ * @dev: device for which to configure the IOMMU
+ * @master_np: device node of the bus master connected to the IOMMU
+ *
+ * The master_np parameter specifies the device node of the bus master seen by
+ * the IOMMU. This is usually the device node of the dev device, but can be the
+ * device node of a bridge when the device is dynamically discovered and
+ * instantiated and thus has no device node (such as PCI devices for instance).
+ *
+ * Return a pointer to the iommu_ops for the device, NULL if the device isn't
+ * connected to an IOMMU, or a negative value if an error occurs.
+ */
struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -159,8 +172,18 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
np = iommu_spec.np;
ops = of_iommu_get_ops(np);
- if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec))
+ if (!ops) {
+ const struct of_device_id *oid;
+
+ oid = of_match_node(&__iommu_of_table, np);
+ ops = oid ? ERR_PTR(-EPROBE_DEFER) : NULL;
goto err_put_node;
+ }
+
+ if (!ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) {
+ ops = NULL;
+ goto err_put_node;
+ }
of_node_put(np);
idx++;
@@ -170,7 +193,7 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
err_put_node:
of_node_put(np);
- return NULL;
+ return ops;
}
void __init of_iommu_init(void)
@@ -181,7 +204,7 @@ void __init of_iommu_init(void)
for_each_matching_node_and_match(np, matches, &match) {
const of_iommu_init_fn init_fn = match->data;
- if (init_fn(np))
+ if (init_fn && init_fn(np))
pr_err("Failed to initialise IOMMU %s\n",
of_node_full_name(np));
}
diff --git a/drivers/iommu/qcom/Kconfig b/drivers/iommu/qcom/Kconfig
new file mode 100644
index 000000000000..a0e41bb305af
--- /dev/null
+++ b/drivers/iommu/qcom/Kconfig
@@ -0,0 +1,43 @@
+# Qualcomm IOMMU support
+
+# QCOM IOMMUv1 support
+config QCOM_IOMMU_V1
+ bool "Qualcomm IOMMUv1 Support"
+ depends on ARCH_QCOM
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU if ARM
+ select ARM64_DMA_USE_IOMMU if ARM64
+ help
+ Support for the IOMMUs (v1) found on certain Qualcomm SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+
+ If unsure, say N here.
+
+config MMU500_ACTIVE_PREFETCH_BUG_WITH_SECTION_MAPPING
+ bool "Don't align virtual address at 1MB boundary"
+ depends on QCOM_IOMMU_V1
+ help
+ Say Y here if the MMU500 revision has a bug in active prefetch
+ which can cause TLB corruptions due to 1MB alignment of a buffer.
+ Here is the sequence which will surface this BUG.
+ 1) Create a 2-level mapping in v7S format for 1MB buffer. Start of
+ the buffer should be at even MB boundary.
+ 2) Create a section mapping for 1MB buffer adjacent to previous
+ mapping in step 1.
+ 3) Access last page from 2 level mapping followed by an access into
+ section mapped area.
+ 4) Step 3 will result into TLB corruption and this corruption can
+ lead to any misbehavior (like Permission fault) for sub-sequent
+ transactions.
+
+ If unsure, say Y here if IOMMU mapping will not exhaust the VA space.
+
+config IOMMU_PGTABLES_L2
+ bool "Allow SMMU page tables in the L2 cache (Experimental)"
+ depends on QCOM_IOMMU_V1 && MMU && SMP && CPU_DCACHE_DISABLE=n
+ help
+ Improves TLB miss latency at the expense of potential L2 pollution.
+ However, with large multimedia buffers, the TLB should mostly contain
+ section mappings and TLB misses should be quite infrequent.
+ Most people can probably say Y here.
diff --git a/drivers/iommu/qcom/Makefile b/drivers/iommu/qcom/Makefile
new file mode 100644
index 000000000000..e0b1159227be
--- /dev/null
+++ b/drivers/iommu/qcom/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_QCOM_IOMMU_V1) += qcom-iommu.o
+
+qcom-iommu-y += msm_iommu.o
+qcom-iommu-y += msm_iommu-v1.o
+qcom-iommu-y += msm_iommu_dev-v1.o
+qcom-iommu-y += msm_iommu_sec.o
+qcom-iommu-y += msm_iommu_pagetable.o
diff --git a/drivers/iommu/qcom/msm_iommu-v1.c b/drivers/iommu/qcom/msm_iommu-v1.c
new file mode 100644
index 000000000000..82c7f2f16658
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu-v1.c
@@ -0,0 +1,1538 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm-bus.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/notifier.h>
+#include <linux/iopoll.h>
+#include <linux/qcom_iommu.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_pagetable.h"
+
+#ifdef CONFIG_IOMMU_LPAE
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_2M | SZ_32M | SZ_1G)
+#else
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+#endif
+
+#define IOMMU_MSEC_STEP 10
+#define IOMMU_MSEC_TIMEOUT 5000
+
+static DEFINE_MUTEX(msm_iommu_lock);
+struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+static int __enable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ ++drvdata->powered_on;
+
+ return 0;
+}
+
+static void __disable_regulators(struct msm_iommu_drvdata *drvdata)
+{
+ --drvdata->powered_on;
+}
+
+static int apply_bus_vote(struct msm_iommu_drvdata *drvdata, unsigned int vote)
+{
+ int ret = 0;
+
+ if (drvdata->bus_client) {
+ ret = msm_bus_scale_client_update_request(drvdata->bus_client,
+ vote);
+ if (ret)
+ pr_err("%s: Failed to vote for bus: %d\n", __func__,
+ vote);
+ }
+
+ return ret;
+}
+
+static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ int ret;
+
+ ret = clk_prepare_enable(drvdata->iface);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(drvdata->core);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ clk_disable_unprepare(drvdata->iface);
+ return ret;
+}
+
+static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
+{
+ clk_disable_unprepare(drvdata->core);
+ clk_disable_unprepare(drvdata->iface);
+}
+
+static void _iommu_lock_acquire(unsigned int need_extra_lock)
+{
+ mutex_lock(&msm_iommu_lock);
+}
+
+static void _iommu_lock_release(unsigned int need_extra_lock)
+{
+ mutex_unlock(&msm_iommu_lock);
+}
+
+struct iommu_access_ops iommu_access_ops_v1 = {
+ .iommu_power_on = __enable_regulators,
+ .iommu_power_off = __disable_regulators,
+ .iommu_bus_vote = apply_bus_vote,
+ .iommu_clk_on = __enable_clocks,
+ .iommu_clk_off = __disable_clocks,
+ .iommu_lock_acquire = _iommu_lock_acquire,
+ .iommu_lock_release = _iommu_lock_release,
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_iommu_notifier_list);
+
+void msm_iommu_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&msm_iommu_notifier_list, nb);
+}
+EXPORT_SYMBOL(msm_iommu_register_notify);
+
+#ifdef CONFIG_MSM_IOMMU_VBIF_CHECK
+
+#define VBIF_XIN_HALT_CTRL0 0x200
+#define VBIF_XIN_HALT_CTRL1 0x204
+#define VBIF_AXI_HALT_CTRL0 0x208
+#define VBIF_AXI_HALT_CTRL1 0x20C
+
+static void __halt_vbif_xin(void __iomem *vbif_base)
+{
+ pr_err("Halting VBIF_XIN\n");
+ writel_relaxed(0xFFFFFFFF, vbif_base + VBIF_XIN_HALT_CTRL0);
+}
+
+static void __dump_vbif_state(void __iomem *base, void __iomem *vbif_base)
+{
+ unsigned int reg_val;
+
+ reg_val = readl_relaxed(base + MICRO_MMU_CTRL);
+ pr_err("Value of SMMU_IMPLDEF_MICRO_MMU_CTRL = 0x%x\n", reg_val);
+
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL0);
+ pr_err("Value of VBIF_XIN_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_XIN_HALT_CTRL1);
+ pr_err("Value of VBIF_XIN_HALT_CTRL1 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL0);
+ pr_err("Value of VBIF_AXI_HALT_CTRL0 = 0x%x\n", reg_val);
+ reg_val = readl_relaxed(vbif_base + VBIF_AXI_HALT_CTRL1);
+ pr_err("Value of VBIF_AXI_HALT_CTRL1 = 0x%x\n", reg_val);
+}
+
+static int __check_vbif_state(struct msm_iommu_drvdata const *drvdata)
+{
+ phys_addr_t addr = (phys_addr_t) (drvdata->phys_base
+ - (phys_addr_t) 0x4000);
+ void __iomem *base = ioremap(addr, 0x1000);
+ int ret = 0;
+
+ if (base) {
+ __dump_vbif_state(drvdata->base, base);
+ __halt_vbif_xin(base);
+ __dump_vbif_state(drvdata->base, base);
+ iounmap(base);
+ } else {
+ pr_err("%s: Unable to ioremap\n", __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for IOMMU halt to complete for %s\n", name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if IOMMU halt completed for %s\n", name);
+
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+
+ if (res) {
+ pr_err("Timed out (again) waiting for IOMMU halt to complete for %s\n",
+ name);
+ } else {
+ pr_err("IOMMU halt completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ int res;
+ unsigned int val;
+ void __iomem *base = drvdata->cb_base;
+ char const *name = drvdata->name;
+
+ pr_err("Timed out waiting for TLB SYNC to complete for %s (client: %s)\n",
+ name, priv->client_name);
+ blocking_notifier_call_chain(&msm_iommu_notifier_list, TLB_SYNC_TIMEOUT,
+ (void *) priv->client_name);
+ res = __check_vbif_state(drvdata);
+ if (res)
+ BUG();
+
+ pr_err("Checking if TLB sync completed for %s\n", name);
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res) {
+ pr_err("Timed out (again) waiting for TLB SYNC to complete for %s\n",
+ name);
+ } else {
+ pr_err("TLB Sync completed. VBIF FIFO most likely not getting drained by master\n");
+ }
+ BUG();
+}
+
+#else
+
+/*
+ * For targets without VBIF or for targets with the VBIF check disabled
+ * we directly just crash to capture the issue
+ */
+static void check_halt_state(struct msm_iommu_drvdata const *drvdata)
+{
+ BUG();
+}
+
+static void check_tlb_sync_state(struct msm_iommu_drvdata const *drvdata,
+ int ctx, struct msm_iommu_priv *priv)
+{
+ BUG();
+}
+
+#endif
+
+void iommu_halt(struct msm_iommu_drvdata const *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ unsigned int val;
+ int res;
+
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(base, 1);
+ res = readl_poll_timeout(GLB_REG(MICRO_MMU_CTRL, base), val,
+ (val & MMU_CTRL_IDLE) == MMU_CTRL_IDLE, 0, 5000000);
+ if (res)
+ check_halt_state(iommu_drvdata);
+
+ /* Ensure device is idle before continuing */
+ mb();
+}
+
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata)
+{
+ if (!iommu_drvdata->halt_enabled)
+ return;
+
+ /* Ensure transactions have completed before releasing the halt */
+ mb();
+
+ SET_MICRO_MMU_CTRL_HALT_REQ(iommu_drvdata->base, 0);
+
+ /*
+ * Ensure write is complete before continuing to ensure
+ * we don't turn off clocks while transaction is still
+ * pending.
+ */
+ mb();
+}
+
+static void __sync_tlb(struct msm_iommu_drvdata *iommu_drvdata, int ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+ unsigned int val;
+ unsigned int res;
+
+ SET_TLBSYNC(base, ctx, 0);
+ /* No barrier needed due to read dependency */
+
+ res = readl_poll_timeout(CTX_REG(CB_TLBSTATUS, base, ctx), val,
+ (val & CB_TLBSTATUS_SACTIVE) == 0, 0, 5000000);
+ if (res)
+ check_tlb_sync_state(iommu_drvdata, ctx, priv);
+}
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+static int __flush_iotlb_va(struct iommu_domain *domain, unsigned int va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIVA(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid | (va & CB_TLBIVA_VA));
+ mb();
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+#endif
+
+static int __flush_iotlb(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
+ BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
+
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+ BUG_ON(!iommu_drvdata);
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ __disable_clocks(iommu_drvdata);
+ }
+
+fail:
+ return ret;
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __reset_iommu(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+ int i, smt_size, res;
+ unsigned long val;
+
+ /* SMMU_ACR is an implementation defined register.
+ * Resetting is not required for some implementation.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACR(base, 0);
+ SET_CR2(base, 0);
+ SET_GFAR(base, 0);
+ SET_GFSRRESTORE(base, 0);
+
+ /* Invalidate the entire non-secure TLB */
+ SET_TLBIALLNSNH(base, 0);
+ SET_TLBGSYNC(base, 0);
+ res = readl_poll_timeout(GLB_REG(TLBGSTATUS, base), val,
+ (val & TLBGSTATUS_GSACTIVE) == 0, 0, 5000000);
+ if (res)
+ BUG();
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ for (i = 0; i < smt_size; i++)
+ SET_SMR_VALID(base, i, 0);
+
+ mb();
+}
+
+static void __reset_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model != MMU_500)
+ SET_NSACR(base, 0);
+ SET_NSCR2(base, 0);
+ SET_NSGFAR(base, 0);
+ SET_NSGFSRRESTORE(base, 0);
+ mb();
+}
+
+static void __program_iommu_secure(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ void __iomem *base = iommu_drvdata->base;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_NSACR_SMTNMC_BPTLBEN(base, 1);
+ SET_NSACR_MMUDIS_BPTLBEN(base, 1);
+ SET_NSACR_S2CR_BPTLBEN(base, 1);
+ }
+ SET_NSCR0_SMCFCFG(base, 1);
+ SET_NSCR0_USFCFG(base, 1);
+ SET_NSCR0_STALLD(base, 1);
+ SET_NSCR0_GCFGFIE(base, 1);
+ SET_NSCR0_GCFGFRE(base, 1);
+ SET_NSCR0_GFIE(base, 1);
+ SET_NSCR0_GFRE(base, 1);
+ SET_NSCR0_CLIENTPD(base, 0);
+}
+
+/*
+ * May only be called for non-secure iommus
+ */
+static void __program_iommu(struct msm_iommu_drvdata *drvdata)
+{
+ __reset_iommu(drvdata);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __reset_iommu_secure(drvdata);
+
+ if (drvdata->model == MMU_500) {
+ SET_ACR_SMTNMC_BPTLBEN(drvdata->base, 1);
+ SET_ACR_MMUDIS_BPTLBEN(drvdata->base, 1);
+ SET_ACR_S2CR_BPTLBEN(drvdata->base, 1);
+ }
+ SET_CR0_SMCFCFG(drvdata->base, 1);
+ SET_CR0_USFCFG(drvdata->base, 1);
+ SET_CR0_STALLD(drvdata->base, 1);
+ SET_CR0_GCFGFIE(drvdata->base, 1);
+ SET_CR0_GCFGFRE(drvdata->base, 1);
+ SET_CR0_GFIE(drvdata->base, 1);
+ SET_CR0_GFRE(drvdata->base, 1);
+ SET_CR0_CLIENTPD(drvdata->base, 0);
+
+ if (!msm_iommu_get_scm_call_avail())
+ __program_iommu_secure(drvdata);
+
+ if (drvdata->smmu_local_base)
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+
+ mb(); /* Make sure writes complete before returning */
+}
+
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings)
+{
+ unsigned int i;
+
+ if (bfb_settings)
+ for (i = 0; i < bfb_settings->length; i++)
+ SET_GLOBAL_REG(base, bfb_settings->regs[i],
+ bfb_settings->data[i]);
+
+ /* Make sure writes complete before returning */
+ mb();
+}
+
+static void __reset_context(struct msm_iommu_drvdata *iommu_drvdata, int ctx)
+{
+ void __iomem *base = iommu_drvdata->cb_base;
+
+ /* Don't set ACTLR to zero because if context bank is in
+ * bypass mode (say after iommu_detach), still this ACTLR
+ * value matters for micro-TLB caching.
+ */
+ if (iommu_drvdata->model != MMU_500)
+ SET_ACTLR(base, ctx, 0);
+ SET_FAR(base, ctx, 0);
+ SET_FSRRESTORE(base, ctx, 0);
+ SET_NMRR(base, ctx, 0);
+ SET_PAR(base, ctx, 0);
+ SET_PRRR(base, ctx, 0);
+ SET_SCTLR(base, ctx, 0);
+ SET_TTBCR(base, ctx, 0);
+ SET_TTBR0(base, ctx, 0);
+ SET_TTBR1(base, ctx, 0);
+ mb();
+}
+
+static void __release_smg(void __iomem *base)
+{
+ int i, smt_size;
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Invalidate all SMGs */
+ for (i = 0; i < smt_size; i++)
+ if (GET_SMR_VALID(base, i))
+ SET_SMR_VALID(base, i, 0);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_TTBR0_ASID(base, ctx_num, asid);
+}
+#else
+static void msm_iommu_set_ASID(void __iomem *base, unsigned int ctx_num,
+ unsigned int asid)
+{
+ SET_CB_CONTEXTIDR_ASID(base, ctx_num, asid);
+}
+#endif
+
+static void msm_iommu_assign_ASID(const struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *curr_ctx,
+ struct msm_iommu_priv *priv)
+{
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+
+ curr_ctx->asid = curr_ctx->num;
+ msm_iommu_set_ASID(cb_base, curr_ctx->num, curr_ctx->asid);
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_TTBCR_EAE(base, ctx, 1); /* Extended Address Enable (EAE) */
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_CB_MAIR0(base, ctx, msm_iommu_get_mair0());
+ SET_CB_MAIR1(base, ctx, msm_iommu_get_mair1());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /*
+ * Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBCR_SH0(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN0(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN0(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T0SZ(base, ctx, 0); /* 0GB-4GB */
+
+
+ SET_CB_TTBCR_SH1(base, ctx, 3); /* Inner shareable */
+ SET_CB_TTBCR_ORGN1(base, ctx, 1); /* outer cachable*/
+ SET_CB_TTBCR_IRGN1(base, ctx, 1); /* inner cachable*/
+ SET_CB_TTBCR_T1SZ(base, ctx, 0); /* TTBR1 not used */
+}
+
+#else
+
+static void msm_iommu_setup_ctx(void __iomem *base, unsigned int ctx)
+{
+ /* Turn on TEX Remap */
+ SET_CB_SCTLR_TRE(base, ctx, 1);
+}
+
+static void msm_iommu_setup_memory_remap(void __iomem *base, unsigned int ctx)
+{
+ SET_PRRR(base, ctx, msm_iommu_get_prrr());
+ SET_NMRR(base, ctx, msm_iommu_get_nmrr());
+}
+
+static void msm_iommu_setup_pg_l2_redirect(void __iomem *base, unsigned int ctx)
+{
+ /* Configure page tables as inner-cacheable and shareable to reduce
+ * the TLB miss penalty.
+ */
+ SET_CB_TTBR0_S(base, ctx, 1);
+ SET_CB_TTBR0_NOS(base, ctx, 1);
+ SET_CB_TTBR0_IRGN1(base, ctx, 0); /* WB, WA */
+ SET_CB_TTBR0_IRGN0(base, ctx, 1);
+ SET_CB_TTBR0_RGN(base, ctx, 1); /* WB, WA */
+}
+
+#endif
+
+static int program_m2v_table(struct device *dev, void __iomem *base)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata = dev_get_drvdata(dev);
+ u32 *sids = ctx_drvdata->sids;
+ u32 *sid_mask = ctx_drvdata->sid_mask;
+ unsigned int ctx = ctx_drvdata->num;
+ int num = 0, i, smt_size;
+ int len = ctx_drvdata->nsid;
+
+ smt_size = GET_IDR0_NUMSMRG(base);
+
+ /* Program the M2V tables for this context */
+ for (i = 0; i < len / sizeof(*sids); i++) {
+ for (; num < smt_size; num++)
+ if (GET_SMR_VALID(base, num) == 0)
+ break;
+ BUG_ON(num >= smt_size);
+
+ SET_SMR_VALID(base, num, 1);
+ SET_SMR_MASK(base, num, sid_mask[i]);
+ SET_SMR_ID(base, num, sids[i]);
+
+ SET_S2CR_N(base, num, 0);
+ SET_S2CR_CBNDX(base, num, ctx);
+ SET_S2CR_MEMATTR(base, num, 0x0A);
+ /* Set security bit override to be Non-secure */
+ SET_S2CR_NSCFG(base, num, 3);
+ }
+
+ return 0;
+}
+
+static void program_all_m2v_tables(struct msm_iommu_drvdata *iommu_drvdata)
+{
+ device_for_each_child(iommu_drvdata->dev, iommu_drvdata->base,
+ program_m2v_table);
+}
+
+static void __program_context(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ struct msm_iommu_priv *priv, bool is_secure,
+ bool program_m2v)
+{
+ phys_addr_t pn;
+ void __iomem *base = iommu_drvdata->base;
+ void __iomem *cb_base = iommu_drvdata->cb_base;
+ unsigned int ctx = ctx_drvdata->num;
+ phys_addr_t pgtable = __pa(priv->pt.fl_table);
+
+ __reset_context(iommu_drvdata, ctx);
+ msm_iommu_setup_ctx(cb_base, ctx);
+
+ if (priv->pt.redirect)
+ msm_iommu_setup_pg_l2_redirect(cb_base, ctx);
+
+ msm_iommu_setup_memory_remap(cb_base, ctx);
+
+ pn = pgtable >> CB_TTBR0_ADDR_SHIFT;
+ SET_CB_TTBR0_ADDR(cb_base, ctx, pn);
+
+ /* Enable context fault interrupt */
+ SET_CB_SCTLR_CFIE(cb_base, ctx, 1);
+
+ if (iommu_drvdata->model != MMU_500) {
+ /* Redirect all cacheable requests to L2 slave port. */
+ SET_CB_ACTLR_BPRCISH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCOSH(cb_base, ctx, 1);
+ SET_CB_ACTLR_BPRCNSH(cb_base, ctx, 1);
+ }
+
+ /* Enable private ASID namespace */
+ SET_CB_SCTLR_ASIDPNE(cb_base, ctx, 1);
+
+ if (!is_secure) {
+ if (program_m2v)
+ program_all_m2v_tables(iommu_drvdata);
+
+ SET_CBAR_N(base, ctx, 0);
+
+ /* Stage 1 Context with Stage 2 bypass */
+ SET_CBAR_TYPE(base, ctx, 1);
+
+ /* Route page faults to the non-secure interrupt */
+ SET_CBAR_IRPTNDX(base, ctx, 1);
+
+ /* Set VMID to non-secure HLOS */
+ SET_CBAR_VMID(base, ctx, 3);
+
+ /* Bypass is treated as inner-shareable */
+ SET_CBAR_BPSHCFG(base, ctx, 2);
+
+ /* Do not downgrade memory attributes */
+ SET_CBAR_MEMATTR(base, ctx, 0x0A);
+
+ }
+
+ msm_iommu_assign_ASID(iommu_drvdata, ctx_drvdata, priv);
+
+ /* Ensure that ASID assignment has completed before we use
+ * ASID for TLB invalidation. Here, mb() is required because
+ * both these registers are separated by more than 1KB. */
+ mb();
+
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+
+ /* Enable the MMU */
+ SET_CB_SCTLR_M(cb_base, ctx, 1);
+ mb();
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+#define INITIAL_REDIRECT_VAL 1
+#else
+#define INITIAL_REDIRECT_VAL 0
+#endif
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENOMEM;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto fail_nomem;
+
+ priv->pt.redirect = INITIAL_REDIRECT_VAL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+
+ ret = msm_iommu_pagetable_alloc(&priv->pt);
+ if (ret)
+ goto fail_nomem;
+
+ domain = &priv->domain;
+
+ return domain;
+
+fail_nomem:
+ kfree(priv);
+ return 0;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+
+ if (priv)
+ msm_iommu_pagetable_free(&priv->pt);
+
+ kfree(priv);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+ int is_secure;
+ bool set_m2v = false;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ++ctx_drvdata->attach_count;
+
+ if (ctx_drvdata->attach_count > 1)
+ goto already_attached;
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ ret = __enable_regulators(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ ret = apply_bus_vote(iommu_drvdata, 1);
+ if (ret)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ goto unlock;
+ }
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ if (!is_secure) {
+ iommu_halt(iommu_drvdata);
+ __program_iommu(iommu_drvdata);
+ iommu_resume(iommu_drvdata);
+ } else {
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+ if (ret) {
+ __disable_regulators(iommu_drvdata);
+ __disable_clocks(iommu_drvdata);
+ goto unlock;
+ }
+ }
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+ set_m2v = true;
+ }
+
+ iommu_halt(iommu_drvdata);
+
+ __program_context(iommu_drvdata, ctx_drvdata, priv, is_secure, set_m2v);
+
+ iommu_resume(iommu_drvdata);
+
+ /* Ensure TLB is clear */
+ if (iommu_drvdata->model != MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+already_attached:
+ mutex_unlock(&msm_iommu_lock);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+ int is_secure;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto unlock;
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto unlock;
+
+ --ctx_drvdata->attach_count;
+ BUG_ON(ctx_drvdata->attach_count < 0);
+
+ if (ctx_drvdata->attach_count > 0)
+ goto unlock;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret)
+ goto unlock;
+
+ is_secure = iommu_drvdata->sec_id != -1;
+
+ if (iommu_drvdata->model == MMU_500) {
+ SET_TLBIASID(iommu_drvdata->cb_base, ctx_drvdata->num,
+ ctx_drvdata->asid);
+ __sync_tlb(iommu_drvdata, ctx_drvdata->num, priv);
+ }
+
+ ctx_drvdata->asid = -1;
+
+ __reset_context(iommu_drvdata, ctx_drvdata->num);
+
+ /*
+ * Only reset the M2V tables on the very last detach */
+ if (!is_secure && iommu_drvdata->ctx_attach_count == 1) {
+ iommu_halt(iommu_drvdata);
+ __release_smg(iommu_drvdata->base);
+ iommu_resume(iommu_drvdata);
+ }
+
+ __disable_clocks(iommu_drvdata);
+
+ apply_bus_vote(iommu_drvdata, 0);
+
+ __disable_regulators(iommu_drvdata);
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+unlock:
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_priv *priv;
+ int ret = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ ret = msm_iommu_pagetable_map(&priv->pt, va, pa, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ ret = __flush_iotlb_va(domain, va);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_priv *priv;
+ int ret = -ENODEV;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv)
+ goto fail;
+
+ ret = msm_iommu_pagetable_unmap(&priv->pt, va, len);
+ if (ret < 0)
+ goto fail;
+
+ ret = __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static size_t msm_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg, unsigned int nents,
+ int prot)
+{
+ struct msm_iommu_priv *priv;
+ struct scatterlist *tmp;
+ unsigned int len = 0;
+ int ret, i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (!priv) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ for_each_sg(sg, tmp, nents, i)
+ len += tmp->length;
+
+ ret = msm_iommu_pagetable_map_range(&priv->pt, iova, sg, len, prot);
+ if (ret)
+ goto fail;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ __flush_iotlb(domain);
+#endif
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+}
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_priv *priv;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ msm_iommu_pagetable_unmap_range(&priv->pt, va, len);
+
+ __flush_iotlb(domain);
+
+ msm_iommu_pagetable_free_tables(&priv->pt, va, len);
+ mutex_unlock(&msm_iommu_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* Upper 28 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFFFF000ULL) | (va & 0x00000FFF);
+
+ return phy;
+}
+#else
+static phys_addr_t msm_iommu_get_phy_from_PAR(unsigned long va, u64 par)
+{
+ phys_addr_t phy;
+
+ /* We are dealing with a supersection */
+ if (par & CB_PAR_SS)
+ phy = (par & 0xFF000000) | (va & 0x00FFFFFF);
+ else /* Upper 20 bits from PAR, lower 12 from VA */
+ phy = (par & 0xFFFFF000) | (va & 0x00000FFF);
+
+ return phy;
+}
+#endif
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ u64 par;
+ void __iomem *base;
+ phys_addr_t ret = 0;
+ int ctx;
+ int i;
+
+ mutex_lock(&msm_iommu_lock);
+
+ priv = to_msm_priv(domain);
+ if (list_empty(&priv->list_attached))
+ goto fail;
+
+ ctx_drvdata = list_entry(priv->list_attached.next,
+ struct msm_iommu_ctx_drvdata, attached_elm);
+ iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
+
+ if (iommu_drvdata->model == MMU_500) {
+ ret = msm_iommu_iova_to_phys_soft(domain, va);
+ mutex_unlock(&msm_iommu_lock);
+ return ret;
+ }
+
+ base = iommu_drvdata->cb_base;
+ ctx = ctx_drvdata->num;
+
+ ret = __enable_clocks(iommu_drvdata);
+ if (ret) {
+ ret = 0; /* 0 indicates translation failed */
+ goto fail;
+ }
+
+ SET_ATS1PR(base, ctx, va & CB_ATS1PR_ADDR);
+ mb();
+ for (i = 0; i < IOMMU_MSEC_TIMEOUT; i += IOMMU_MSEC_STEP)
+ if (GET_CB_ATSR_ACTIVE(base, ctx) == 0)
+ break;
+ else
+ msleep(IOMMU_MSEC_STEP);
+
+ if (i >= IOMMU_MSEC_TIMEOUT) {
+ pr_err("%s: iova to phys timed out on %pa for %s (%s)\n",
+ __func__, &va, iommu_drvdata->name, ctx_drvdata->name);
+ ret = 0;
+ goto fail;
+ }
+
+ par = GET_PAR(base, ctx);
+ __disable_clocks(iommu_drvdata);
+
+ if (par & CB_PAR_F) {
+ unsigned int level = (par & CB_PAR_PLVL) >> CB_PAR_PLVL_SHIFT;
+ pr_err("IOMMU translation fault!\n");
+ pr_err("name = %s\n", iommu_drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ pr_err("PAR = %16llx [%s%s%s%s%s%s%s%sPLVL%u %s]\n", par,
+ (par & CB_PAR_F) ? "F " : "",
+ (par & CB_PAR_TF) ? "TF " : "",
+ (par & CB_PAR_AFF) ? "AFF " : "",
+ (par & CB_PAR_PF) ? "PF " : "",
+ (par & CB_PAR_EF) ? "EF " : "",
+ (par & CB_PAR_TLBMCF) ? "TLBMCF " : "",
+ (par & CB_PAR_TLBLKF) ? "TLBLKF " : "",
+ (par & CB_PAR_ATOT) ? "ATOT " : "",
+ level,
+ (par & CB_PAR_STAGE) ? "S2 " : "S1 ");
+ ret = 0;
+ } else {
+ ret = msm_iommu_get_phy_from_PAR(va, par);
+ }
+
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_IOMMU_LPAE
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("MAIR0 = %08x MAIR1 = %08x\n",
+ regs[DUMP_REG_MAIR0].val, regs[DUMP_REG_MAIR1].val);
+}
+#else
+static inline void print_ctx_mem_attr_regs(struct msm_iommu_context_reg regs[])
+{
+ pr_err("PRRR = %08x NMRR = %08x\n",
+ regs[DUMP_REG_PRRR].val, regs[DUMP_REG_NMRR].val);
+}
+#endif
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[])
+{
+ uint32_t fsr = regs[DUMP_REG_FSR].val;
+ u64 ttbr;
+ enum dump_reg iter;
+
+ pr_err("FAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_FAR1].val,
+ regs[DUMP_REG_FAR0].val));
+ pr_err("PAR = %016llx\n",
+ COMBINE_DUMP_REG(
+ regs[DUMP_REG_PAR1].val,
+ regs[DUMP_REG_PAR0].val));
+ pr_err("FSR = %08x [%s%s%s%s%s%s%s%s%s]\n", fsr,
+ (fsr & 0x02) ? "TF " : "",
+ (fsr & 0x04) ? "AFF " : "",
+ (fsr & 0x08) ? "PF " : "",
+ (fsr & 0x10) ? "EF " : "",
+ (fsr & 0x20) ? "TLBMCF " : "",
+ (fsr & 0x40) ? "TLBLKF " : "",
+ (fsr & 0x80) ? "MHF " : "",
+ (fsr & 0x40000000) ? "SS " : "",
+ (fsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("FSYNR0 = %08x FSYNR1 = %08x\n",
+ regs[DUMP_REG_FSYNR0].val, regs[DUMP_REG_FSYNR1].val);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR0_1].val,
+ regs[DUMP_REG_TTBR0_0].val);
+ if (regs[DUMP_REG_TTBR0_1].valid)
+ pr_err("TTBR0 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR0 = %016llx (32b)\n", ttbr);
+
+ ttbr = COMBINE_DUMP_REG(regs[DUMP_REG_TTBR1_1].val,
+ regs[DUMP_REG_TTBR1_0].val);
+
+ if (regs[DUMP_REG_TTBR1_1].valid)
+ pr_err("TTBR1 = %016llx\n", ttbr);
+ else
+ pr_err("TTBR1 = %016llx (32b)\n", ttbr);
+
+ pr_err("SCTLR = %08x ACTLR = %08x\n",
+ regs[DUMP_REG_SCTLR].val, regs[DUMP_REG_ACTLR].val);
+ pr_err("CBAR = %08x CBFRSYNRA = %08x\n",
+ regs[DUMP_REG_CBAR_N].val, regs[DUMP_REG_CBFRSYNRA_N].val);
+ print_ctx_mem_attr_regs(regs);
+
+ for (iter = DUMP_REG_FIRST; iter < MAX_DUMP_REGS; ++iter)
+ if (!regs[iter].valid)
+ pr_err("NOTE: Value actually unknown for %s\n",
+ dump_regs_tbl[iter].name);
+}
+
+static void __print_ctx_regs(struct msm_iommu_drvdata *drvdata, int ctx,
+ unsigned int fsr)
+{
+ void __iomem *base = drvdata->base;
+ void __iomem *cb_base = drvdata->cb_base;
+ bool is_secure = drvdata->sec_id != -1;
+
+ struct msm_iommu_context_reg regs[MAX_DUMP_REGS];
+ unsigned int i;
+ memset(regs, 0, sizeof(regs));
+
+ for (i = DUMP_REG_FIRST; i < MAX_DUMP_REGS; ++i) {
+ struct msm_iommu_context_reg *r = &regs[i];
+ unsigned long regaddr = dump_regs_tbl[i].reg_offset;
+ if (is_secure &&
+ dump_regs_tbl[i].dump_reg_type != DRT_CTX_REG) {
+ r->valid = 0;
+ continue;
+ }
+ r->valid = 1;
+ switch (dump_regs_tbl[i].dump_reg_type) {
+ case DRT_CTX_REG:
+ r->val = GET_CTX_REG(regaddr, cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ r->val = GET_GLOBAL_REG(regaddr, base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ r->val = GET_GLOBAL_REG_N(regaddr, ctx, base);
+ break;
+ default:
+ pr_info("Unknown dump_reg_type...\n");
+ r->valid = 0;
+ break;
+ }
+ }
+ print_ctx_regs(regs);
+}
+
+static void print_global_regs(void __iomem *base, unsigned int gfsr)
+{
+ pr_err("GFAR = %016llx\n", GET_GFAR(base));
+
+ pr_err("GFSR = %08x [%s%s%s%s%s%s%s%s%s%s]\n", gfsr,
+ (gfsr & 0x01) ? "ICF " : "",
+ (gfsr & 0x02) ? "USF " : "",
+ (gfsr & 0x04) ? "SMCF " : "",
+ (gfsr & 0x08) ? "UCBF " : "",
+ (gfsr & 0x10) ? "UCIF " : "",
+ (gfsr & 0x20) ? "CAF " : "",
+ (gfsr & 0x40) ? "EF " : "",
+ (gfsr & 0x80) ? "PF " : "",
+ (gfsr & 0x40000000) ? "SS " : "",
+ (gfsr & 0x80000000) ? "MULTI " : "");
+
+ pr_err("GFSYNR0 = %08x\n", GET_GFSYNR0(base));
+ pr_err("GFSYNR1 = %08x\n", GET_GFSYNR1(base));
+ pr_err("GFSYNR2 = %08x\n", GET_GFSYNR2(base));
+}
+
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ unsigned int gfsr;
+ int ret;
+
+ mutex_lock(&msm_iommu_lock);
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU global fault !!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Can't read global fault information\n");
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ if (drvdata->sec_id != -1) {
+ pr_err("NON-secure interrupt from secure %s\n", drvdata->name);
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ gfsr = GET_GFSR(drvdata->base);
+ if (gfsr) {
+ pr_err("Unexpected %s global fault !!\n", drvdata->name);
+ print_global_regs(drvdata->base, gfsr);
+ SET_GFSR(drvdata->base, gfsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ unsigned int fsr;
+ int ret;
+
+ phys_addr_t pagetable_phys;
+ u64 faulty_iova = 0;
+
+ mutex_lock(&msm_iommu_lock);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ if (!drvdata->powered_on) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ ret = IRQ_HANDLED;
+ goto fail;
+ }
+
+ ret = __enable_clocks(drvdata);
+ if (ret) {
+ ret = IRQ_NONE;
+ goto fail;
+ }
+
+ fsr = GET_FSR(drvdata->cb_base, ctx_drvdata->num);
+ if (fsr) {
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ ret = -ENOSYS;
+ } else {
+ faulty_iova =
+ GET_FAR(drvdata->cb_base, ctx_drvdata->num);
+ ret = report_iommu_fault(ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ faulty_iova, 0);
+
+ }
+ if (ret == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ __print_ctx_regs(drvdata,
+ ctx_drvdata->num, fsr);
+
+ if (ctx_drvdata->attached_domain) {
+ pagetable_phys = msm_iommu_iova_to_phys_soft(
+ ctx_drvdata->attached_domain,
+ faulty_iova);
+ pr_err("Page table in DDR shows PA = %x\n",
+ (unsigned int) pagetable_phys);
+ }
+ }
+
+ if (ret != -EBUSY)
+ SET_FSR(drvdata->cb_base, ctx_drvdata->num, fsr);
+ ret = IRQ_HANDLED;
+ } else
+ ret = IRQ_NONE;
+
+ __disable_clocks(drvdata);
+fail:
+ mutex_unlock(&msm_iommu_lock);
+
+ return ret;
+}
+
+static phys_addr_t msm_iommu_get_pt_base_addr(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+
+ return __pa(priv->pt.fl_table);
+}
+
+#define DUMP_REG_INIT(dump_reg, cb_reg, mbp, drt) \
+ do { \
+ dump_regs_tbl[dump_reg].reg_offset = cb_reg; \
+ dump_regs_tbl[dump_reg].name = #cb_reg; \
+ dump_regs_tbl[dump_reg].must_be_present = mbp; \
+ dump_regs_tbl[dump_reg].dump_reg_type = drt; \
+ } while (0)
+
+static void msm_iommu_build_dump_regs_table(void)
+{
+ DUMP_REG_INIT(DUMP_REG_FAR0, CB_FAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FAR1, CB_FAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR0, CB_PAR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PAR1, CB_PAR + 4, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSR, CB_FSR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR0, CB_FSYNR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_FSYNR1, CB_FSYNR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_0, CB_TTBR0, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR0_1, CB_TTBR0 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_0, CB_TTBR1, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_TTBR1_1, CB_TTBR1 + 4, 0, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_SCTLR, CB_SCTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_ACTLR, CB_ACTLR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_PRRR, CB_PRRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_NMRR, CB_NMRR, 1, DRT_CTX_REG);
+ DUMP_REG_INIT(DUMP_REG_CBAR_N, CBAR, 1, DRT_GLOBAL_REG_N);
+ DUMP_REG_INIT(DUMP_REG_CBFRSYNRA_N, CBFRSYNRA, 1, DRT_GLOBAL_REG_N);
+}
+
+#ifdef CONFIG_IOMMU_PGTABLES_L2
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ priv->pt.redirect = !(*no_redirect);
+ mutex_unlock(&msm_iommu_lock);
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+ struct msm_iommu_priv *priv;
+ int *no_redirect = data;
+
+ mutex_lock(&msm_iommu_lock);
+ priv = to_msm_priv(domain);
+ *no_redirect = !priv->pt.redirect;
+ mutex_unlock(&msm_iommu_lock);
+}
+
+#else
+
+static void __do_set_redirect(struct iommu_domain *domain, void *data)
+{
+}
+
+static void __do_get_redirect(struct iommu_domain *domain, void *data)
+{
+}
+#endif
+
+static int msm_iommu_domain_set_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_set_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int msm_iommu_domain_get_attr(struct iommu_domain *domain,
+ enum iommu_attr attr, void *data)
+{
+ switch (attr) {
+ case DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE:
+ __do_get_redirect(domain, data);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .map_sg = msm_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+/* .get_pt_base_addr = msm_iommu_get_pt_base_addr,*/
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+ .domain_set_attr = msm_iommu_domain_set_attr,
+ .domain_get_attr = msm_iommu_domain_get_attr,
+};
+
+static int __init msm_iommu_init(void)
+{
+ int ret;
+
+ msm_iommu_pagetable_init();
+
+ ret = bus_set_iommu(&platform_bus_type, &msm_iommu_ops);
+ if (ret)
+ return ret;
+
+ msm_iommu_build_dump_regs_table();
+
+ return 0;
+}
+
+subsys_initcall(msm_iommu_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU v2 Driver");
diff --git a/drivers/iommu/qcom/msm_iommu.c b/drivers/iommu/qcom/msm_iommu.c
new file mode 100644
index 000000000000..08d8d10da2c4
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu.c
@@ -0,0 +1,206 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/iommu.h>
+#include <linux/qcom_iommu.h>
+
+static DEFINE_MUTEX(iommu_list_lock);
+static LIST_HEAD(iommu_list);
+
+#define MRC(reg, processor, op1, crn, crm, op2) \
+__asm__ __volatile__ ( \
+" mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \
+: "=r" (reg))
+
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+#define RCP15_MAIR0(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_MAIR1(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+/* These values come from proc-v7-2level.S */
+#define PRRR_VALUE 0xff0a81a8
+#define NMRR_VALUE 0x40e040e0
+
+/* These values come from proc-v7-3level.S */
+#define MAIR0_VALUE 0xeeaa4400
+#define MAIR1_VALUE 0xff000004
+
+static struct iommu_access_ops *iommu_access_ops;
+
+struct bus_type msm_iommu_sec_bus_type = {
+ .name = "msm_iommu_sec_bus",
+};
+
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+ iommu_access_ops = ops;
+}
+
+struct iommu_access_ops *msm_get_iommu_access_ops()
+{
+ BUG_ON(iommu_access_ops == NULL);
+ return iommu_access_ops;
+}
+EXPORT_SYMBOL(msm_get_iommu_access_ops);
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_add(&drv->list, &iommu_list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv)
+{
+ mutex_lock(&iommu_list_lock);
+ list_del(&drv->list);
+ mutex_unlock(&iommu_list_lock);
+}
+
+static int find_iommu_ctx(struct device *dev, void *data)
+{
+ struct msm_iommu_ctx_drvdata *c;
+
+ c = dev_get_drvdata(dev);
+ if (!c || !c->name)
+ return 0;
+
+ return !strcmp(data, c->name);
+}
+
+static struct device *find_context(struct device *dev, const char *name)
+{
+ return device_find_child(dev, (void *)name, find_iommu_ctx);
+}
+
+struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ struct msm_iommu_drvdata *drv;
+ struct device *dev = NULL;
+
+ mutex_lock(&iommu_list_lock);
+ list_for_each_entry(drv, &iommu_list, list) {
+ dev = find_context(drv->dev, ctx_name);
+ if (dev)
+ break;
+ }
+ mutex_unlock(&iommu_list_lock);
+
+ put_device(dev);
+
+ if (!dev || !dev_get_drvdata(dev)) {
+ pr_debug("Could not find context <%s>\n", ctx_name);
+ dev = ERR_PTR(-EPROBE_DEFER);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL(msm_iommu_get_ctx);
+
+#ifdef CONFIG_ARM
+#ifdef CONFIG_IOMMU_LPAE
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are enabled we can use the MAIR
+ * register directly
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ unsigned int mair0;
+
+ RCP15_MAIR0(mair0);
+ return mair0;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ unsigned int mair1;
+
+ RCP15_MAIR1(mair1);
+ return mair1;
+}
+#else
+/*
+ * However, If CONFIG_ARM_LPAE is not enabled but CONFIG_IOMMU_LPAE is enabled
+ * we'll just use the hard coded values directly..
+ */
+u32 msm_iommu_get_mair0(void)
+{
+ return MAIR0_VALUE;
+}
+
+u32 msm_iommu_get_mair1(void)
+{
+ return MAIR1_VALUE;
+}
+#endif
+
+#else
+#ifdef CONFIG_ARM_LPAE
+/*
+ * If CONFIG_ARM_LPAE is enabled AND CONFIG_IOMMU_LPAE is disabled
+ * we must use the hardcoded values.
+ */
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#else
+/*
+ * If both CONFIG_ARM_LPAE AND CONFIG_IOMMU_LPAE are disabled
+ * we can use the registers directly.
+ */
+#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
+#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+
+u32 msm_iommu_get_prrr(void)
+{
+ u32 prrr;
+
+ RCP15_PRRR(prrr);
+ return prrr;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ u32 nmrr;
+
+ RCP15_NMRR(nmrr);
+ return nmrr;
+}
+#endif
+#endif
+#endif
+#ifdef CONFIG_ARM64
+u32 msm_iommu_get_prrr(void)
+{
+ return PRRR_VALUE;
+}
+
+u32 msm_iommu_get_nmrr(void)
+{
+ return NMRR_VALUE;
+}
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_dev-v1.c b/drivers/iommu/qcom/msm_iommu_dev-v1.c
new file mode 100644
index 000000000000..9520cc296e3f
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_dev-v1.c
@@ -0,0 +1,627 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/iommu.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+
+#include "msm_iommu_hw-v1.h"
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_perfmon.h"
+
+static const struct of_device_id msm_iommu_ctx_match_table[];
+
+#ifdef CONFIG_IOMMU_LPAE
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-lpae-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-lpae-bfb-data";
+#else
+static const char *BFB_REG_NODE_NAME = "qcom,iommu-bfb-regs";
+static const char *BFB_DATA_NODE_NAME = "qcom,iommu-bfb-data";
+#endif
+
+static int msm_iommu_parse_bfb_settings(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct msm_iommu_bfb_settings *bfb_settings;
+ u32 nreg, nval;
+ int ret;
+
+ /*
+ * It is not valid for a device to have the BFB_REG_NODE_NAME
+ * property but not the BFB_DATA_NODE_NAME property, and vice versa.
+ */
+ if (!of_get_property(pdev->dev.of_node, BFB_REG_NODE_NAME, &nreg)) {
+ if (of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME,
+ &nval))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (!of_get_property(pdev->dev.of_node, BFB_DATA_NODE_NAME, &nval))
+ return -EINVAL;
+
+ if (nreg >= sizeof(bfb_settings->regs))
+ return -EINVAL;
+
+ if (nval >= sizeof(bfb_settings->data))
+ return -EINVAL;
+
+ if (nval != nreg)
+ return -EINVAL;
+
+ bfb_settings = devm_kzalloc(&pdev->dev, sizeof(*bfb_settings),
+ GFP_KERNEL);
+ if (!bfb_settings)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_REG_NODE_NAME,
+ bfb_settings->regs,
+ nreg / sizeof(*bfb_settings->regs));
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(pdev->dev.of_node,
+ BFB_DATA_NODE_NAME,
+ bfb_settings->data,
+ nval / sizeof(*bfb_settings->data));
+ if (ret)
+ return ret;
+
+ bfb_settings->length = nreg / sizeof(*bfb_settings->regs);
+
+ drvdata->bfb_settings = bfb_settings;
+
+ return 0;
+}
+
+static int __get_bus_vote_client(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ int ret = 0;
+ struct msm_bus_scale_pdata *bs_table;
+ const char *dummy;
+
+ /* Check whether bus scaling has been specified for this node */
+ ret = of_property_read_string(pdev->dev.of_node, "qcom,msm-bus,name",
+ &dummy);
+ if (ret)
+ return 0;
+
+ bs_table = msm_bus_cl_get_pdata(pdev);
+ if (bs_table) {
+ drvdata->bus_client = msm_bus_scale_register_client(bs_table);
+ if (IS_ERR(&drvdata->bus_client)) {
+ pr_err("%s(): Bus client register failed.\n", __func__);
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static void __put_bus_vote_client(struct msm_iommu_drvdata *drvdata)
+{
+ msm_bus_scale_unregister_client(drvdata->bus_client);
+ drvdata->bus_client = 0;
+}
+
+/*
+ * CONFIG_IOMMU_NON_SECURE allows us to override the secure
+ * designation of SMMUs in device tree. With this config enabled
+ * all SMMUs will be programmed by this driver.
+ */
+#ifdef CONFIG_IOMMU_NON_SECURE
+static inline void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+}
+
+static inline void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ ctx_drvdata->secure_context = 0;
+}
+#else
+static void get_secure_id(struct device_node *node,
+ struct msm_iommu_drvdata *drvdata)
+{
+ if (msm_iommu_get_scm_call_avail())
+ of_property_read_u32(node, "qcom,iommu-secure-id",
+ &drvdata->sec_id);
+}
+
+static void get_secure_ctx(struct device_node *node,
+ struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ u32 secure_ctx = 0;
+
+ if (msm_iommu_get_scm_call_avail())
+ secure_ctx = of_property_read_bool(node, "qcom,secure-context");
+
+ ctx_drvdata->secure_context = secure_ctx;
+}
+#endif
+
+static int msm_iommu_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_drvdata *drvdata)
+{
+ struct device_node *child;
+ int ret;
+
+ drvdata->dev = &pdev->dev;
+
+ ret = __get_bus_vote_client(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ ret = msm_iommu_parse_bfb_settings(pdev, drvdata);
+ if (ret)
+ goto fail;
+
+ for_each_available_child_of_node(pdev->dev.of_node, child)
+ drvdata->ncb++;
+
+ ret = of_property_read_string(pdev->dev.of_node, "label",
+ &drvdata->name);
+ if (ret)
+ goto fail;
+
+ drvdata->sec_id = -1;
+ get_secure_id(pdev->dev.of_node, drvdata);
+
+ drvdata->halt_enabled = of_property_read_bool(pdev->dev.of_node,
+ "qcom,iommu-enable-halt");
+
+ msm_iommu_add_drv(drvdata);
+
+ return 0;
+
+fail:
+ __put_bus_vote_client(drvdata);
+ return ret;
+}
+
+static int msm_iommu_pmon_parse_dt(struct platform_device *pdev,
+ struct iommu_pmon *pmon_info)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int cls_prop_size;
+ int ret, irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (irq <= 0) {
+ pmon_info->iommu.evt_irq = -1;
+ return irq;
+ }
+
+ pmon_info->iommu.evt_irq = irq;
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ngroups",
+ &pmon_info->num_groups);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ngroups\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(np, "qcom,iommu-pmu-ncounters",
+ &pmon_info->num_counters);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-ncounters\n");
+ return ret;
+ }
+
+ if (!of_get_property(np, "qcom,iommu-pmu-event-classes",
+ &cls_prop_size)) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return -EINVAL;
+ }
+
+ pmon_info->event_cls_supported = devm_kzalloc(dev, cls_prop_size,
+ GFP_KERNEL);
+ if (!pmon_info->event_cls_supported) {
+ dev_err(dev, "Unable to get memory for event class array\n");
+ return -ENOMEM;
+ }
+
+ pmon_info->nevent_cls_supported = cls_prop_size / sizeof(u32);
+
+ ret = of_property_read_u32_array(np, "qcom,iommu-pmu-event-classes",
+ pmon_info->event_cls_supported,
+ pmon_info->nevent_cls_supported);
+ if (ret) {
+ dev_err(dev, "Error reading qcom,iommu-pmu-event-classes\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = pdev->dev.of_node;
+ struct iommu_pmon *pmon_info;
+ struct msm_iommu_drvdata *drvdata;
+ struct resource *res;
+ int ret;
+ int global_cfg_irq, global_client_irq;
+ u32 temp;
+ unsigned long rate;
+
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iommu_base");
+ drvdata->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->base))
+ return PTR_ERR(drvdata->base);
+
+ drvdata->glb_base = drvdata->base;
+ drvdata->phys_base = res->start;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "smmu_local_base");
+ drvdata->smmu_local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(drvdata->smmu_local_base) &&
+ PTR_ERR(drvdata->smmu_local_base) != -EPROBE_DEFER)
+ drvdata->smmu_local_base = NULL;
+
+ if (of_device_is_compatible(np, "qcom,msm-mmu-500"))
+ drvdata->model = MMU_500;
+
+ drvdata->iface = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(drvdata->iface))
+ return PTR_ERR(drvdata->iface);
+
+ drvdata->core = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(drvdata->core))
+ return PTR_ERR(drvdata->core);
+
+ if (!of_property_read_u32(np, "qcom,cb-base-offset", &temp))
+ drvdata->cb_base = drvdata->base + temp;
+ else
+ drvdata->cb_base = drvdata->base + 0x8000;
+
+ rate = clk_get_rate(drvdata->core);
+ if (!rate) {
+ rate = clk_round_rate(drvdata->core, 1000);
+ clk_set_rate(drvdata->core, rate);
+ }
+
+ dev_info(&pdev->dev, "iface: %lu, core: %lu\n",
+ clk_get_rate(drvdata->iface), clk_get_rate(drvdata->core));
+
+ ret = msm_iommu_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "device %s (model: %d) mapped at %p, with %d ctx banks\n",
+ drvdata->name, drvdata->model, drvdata->base, drvdata->ncb);
+
+ platform_set_drvdata(pdev, drvdata);
+
+ pmon_info = msm_iommu_pm_alloc(dev);
+ if (pmon_info) {
+ ret = msm_iommu_pmon_parse_dt(pdev, pmon_info);
+ if (ret) {
+ msm_iommu_pm_free(dev);
+ dev_info(dev, "%s: pmon not available\n",
+ drvdata->name);
+ } else {
+ pmon_info->iommu.base = drvdata->base;
+ pmon_info->iommu.ops = msm_get_iommu_access_ops();
+ pmon_info->iommu.hw_ops = iommu_pm_get_hw_ops_v1();
+ pmon_info->iommu.iommu_name = drvdata->name;
+ ret = msm_iommu_pm_iommu_register(pmon_info);
+ if (ret) {
+ dev_err(dev, "%s iommu register fail\n",
+ drvdata->name);
+ msm_iommu_pm_free(dev);
+ } else {
+ dev_dbg(dev, "%s iommu registered for pmon\n",
+ pmon_info->iommu.iommu_name);
+ }
+ }
+ }
+
+ global_cfg_irq = platform_get_irq_byname(pdev, "global_cfg_NS_irq");
+ if (global_cfg_irq < 0 && global_cfg_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (global_cfg_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_cfg_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_cfg_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global CFG IRQ %d failed with ret=%d\n",
+ global_cfg_irq, ret);
+ }
+
+ global_client_irq =
+ platform_get_irq_byname(pdev, "global_client_NS_irq");
+ if (global_client_irq < 0 && global_client_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (global_client_irq > 0) {
+ ret = devm_request_threaded_irq(dev, global_client_irq,
+ NULL,
+ msm_iommu_global_fault_handler,
+ IRQF_ONESHOT | IRQF_SHARED |
+ IRQF_TRIGGER_RISING,
+ "msm_iommu_global_client_irq",
+ pdev);
+ if (ret < 0)
+ dev_err(dev, "Request Global Client IRQ %d failed with ret=%d\n",
+ global_client_irq, ret);
+ }
+
+ ret = of_platform_populate(np, msm_iommu_ctx_match_table, NULL, dev);
+ if (ret)
+ dev_err(dev, "Failed to create iommu context device\n");
+
+ return ret;
+}
+
+static int msm_iommu_remove(struct platform_device *pdev)
+{
+ struct msm_iommu_drvdata *drv;
+
+ msm_iommu_pm_iommu_unregister(&pdev->dev);
+ msm_iommu_pm_free(&pdev->dev);
+
+ drv = platform_get_drvdata(pdev);
+ if (drv) {
+ __put_bus_vote_client(drv);
+ msm_iommu_remove_drv(drv);
+ platform_set_drvdata(pdev, NULL);
+ }
+
+ return 0;
+}
+
+static int msm_iommu_ctx_parse_dt(struct platform_device *pdev,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ struct resource *r, rp;
+ int irq = 0, ret = 0;
+ struct msm_iommu_drvdata *drvdata;
+ u32 nsid;
+ u32 n_sid_mask;
+ unsigned long cb_offset;
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+
+ get_secure_ctx(pdev->dev.of_node, drvdata, ctx_drvdata);
+
+ if (ctx_drvdata->secure_context) {
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_secure_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_secure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ return ret;
+ }
+ }
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0 && irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (irq > 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ msm_iommu_fault_handler_v2,
+ IRQF_ONESHOT | IRQF_SHARED,
+ "msm_iommu_nonsecure_irq", pdev);
+ if (ret) {
+ pr_err("Request IRQ %d failed with ret=%d\n",
+ irq, ret);
+ goto out;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_address_to_resource(pdev->dev.parent->of_node, 0, &rp);
+ if (ret)
+ goto out;
+
+ /* Calculate the context bank number using the base addresses.
+ * Typically CB0 base address is 0x8000 pages away if the number
+ * of CBs are <=8. So, assume the offset 0x8000 until mentioned
+ * explicitely.
+ */
+ cb_offset = drvdata->cb_base - drvdata->base;
+ ctx_drvdata->num = (r->start - rp.start - cb_offset) >> CTX_SHIFT;
+
+ if (of_property_read_string(pdev->dev.of_node, "label",
+ &ctx_drvdata->name))
+ ctx_drvdata->name = dev_name(&pdev->dev);
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-ctx-sids", &nsid)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (nsid >= sizeof(ctx_drvdata->sids)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-ctx-sids",
+ ctx_drvdata->sids,
+ nsid / sizeof(*ctx_drvdata->sids))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->nsid = nsid;
+ ctx_drvdata->asid = -1;
+
+ if (!of_get_property(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ &n_sid_mask)) {
+ memset(ctx_drvdata->sid_mask, 0, MAX_NUM_SMR);
+ goto out;
+ }
+
+ if (n_sid_mask != nsid) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,iommu-sid-mask",
+ ctx_drvdata->sid_mask,
+ n_sid_mask / sizeof(*ctx_drvdata->sid_mask))) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx_drvdata->n_sid_mask = n_sid_mask;
+
+out:
+ return ret;
+}
+
+static int msm_iommu_ctx_probe(struct platform_device *pdev)
+{
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret;
+
+ if (!pdev->dev.parent)
+ return -EINVAL;
+
+ ctx_drvdata = devm_kzalloc(&pdev->dev, sizeof(*ctx_drvdata),
+ GFP_KERNEL);
+ if (!ctx_drvdata)
+ return -ENOMEM;
+
+ ctx_drvdata->pdev = pdev;
+ INIT_LIST_HEAD(&ctx_drvdata->attached_elm);
+
+ ret = msm_iommu_ctx_parse_dt(pdev, ctx_drvdata);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, ctx_drvdata);
+
+ dev_info(&pdev->dev, "context %s using bank %d\n",
+ ctx_drvdata->name, ctx_drvdata->num);
+
+ return 0;
+}
+
+static int msm_iommu_ctx_remove(struct platform_device *pdev)
+{
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id msm_iommu_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ {}
+};
+
+static struct platform_driver msm_iommu_driver = {
+ .driver = {
+ .name = "msm_iommu",
+ .of_match_table = msm_iommu_match_table,
+ },
+ .probe = msm_iommu_probe,
+ .remove = msm_iommu_remove,
+};
+
+static const struct of_device_id msm_iommu_ctx_match_table[] = {
+ { .compatible = "qcom,msm-smmu-v1-ctx", },
+ { .compatible = "qcom,msm-smmu-v2-ctx", },
+ {}
+};
+
+static struct platform_driver msm_iommu_ctx_driver = {
+ .driver = {
+ .name = "msm_iommu_ctx",
+ .of_match_table = msm_iommu_ctx_match_table,
+ },
+ .probe = msm_iommu_ctx_probe,
+ .remove = msm_iommu_ctx_remove,
+};
+
+static int __init msm_iommu_driver_init(void)
+{
+ int ret;
+
+ msm_iommu_check_scm_call_avail();
+ msm_set_iommu_access_ops(&iommu_access_ops_v1);
+ msm_iommu_sec_set_access_ops(&iommu_access_ops_v1);
+
+ ret = platform_driver_register(&msm_iommu_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU driver\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&msm_iommu_ctx_driver);
+ if (ret) {
+ pr_err("Failed to register IOMMU context driver\n");
+ platform_driver_unregister(&msm_iommu_driver);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit msm_iommu_driver_exit(void)
+{
+ platform_driver_unregister(&msm_iommu_ctx_driver);
+ platform_driver_unregister(&msm_iommu_driver);
+}
+
+subsys_initcall(msm_iommu_driver_init);
+module_exit(msm_iommu_driver_exit);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/qcom/msm_iommu_hw-v1.h b/drivers/iommu/qcom/msm_iommu_hw-v1.h
new file mode 100644
index 000000000000..53e2f4874adb
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_hw-v1.h
@@ -0,0 +1,2320 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_HW_V2_H
+
+#define CTX_SHIFT 12
+
+#define CTX_REG(reg, base, ctx) \
+ ((base) + (reg) + ((ctx) << CTX_SHIFT))
+#define GLB_REG(reg, base) \
+ ((base) + (reg))
+#define GLB_REG_N(b, n, r) GLB_REG(b, ((r) + ((n) << 2)))
+#define GLB_FIELD(b, r) ((b) + (r))
+#define GLB_CTX_FIELD(b, c, r) (GLB_FIELD(b, r) + ((c) << CTX_SHIFT))
+#define GLB_FIELD_N(b, n, r) (GLB_FIELD(b, r) + ((n) << 2))
+
+
+#define GET_GLOBAL_REG(reg, base) (readl_relaxed(GLB_REG(reg, base)))
+#define GET_GLOBAL_REG_Q(reg, base) (readq_relaxed(GLB_REG(reg, base)))
+#define GET_CTX_REG(reg, base, ctx) (readl_relaxed(CTX_REG(reg, base, ctx)))
+#define GET_CTX_REG_Q(reg, base, ctx) (readq_relaxed(CTX_REG(reg, base, ctx)))
+
+#define SET_GLOBAL_REG(reg, base, val) writel_relaxed((val), GLB_REG(reg, base))
+#define SET_GLOBAL_REG_Q(reg, base, val) \
+ (writeq_relaxed((val), GLB_REG(reg, base)))
+
+#define SET_CTX_REG(reg, base, ctx, val) \
+ writel_relaxed((val), (CTX_REG(reg, base, ctx)))
+#define SET_CTX_REG_Q(reg, base, ctx, val) \
+ writeq_relaxed((val), CTX_REG(reg, base, ctx))
+
+/* Wrappers for numbered registers */
+#define SET_GLOBAL_REG_N(b, n, r, v) writel_relaxed(((v)), GLB_REG_N(b, n, r))
+#define GET_GLOBAL_REG_N(b, n, r) (readl_relaxed(GLB_REG_N(b, n, r)))
+
+/* Field wrappers */
+#define GET_GLOBAL_FIELD(b, r, F) \
+ GET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD(b, c, r, F) \
+ GET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+#define GET_CONTEXT_FIELD_Q(b, c, r, F) \
+ GET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define SET_GLOBAL_FIELD(b, r, F, v) \
+ SET_FIELD(GLB_FIELD(b, r), r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD(b, c, r, F, v) \
+ SET_FIELD(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+#define SET_CONTEXT_FIELD_Q(b, c, r, F, v) \
+ SET_FIELD_Q(GLB_CTX_FIELD(b, c, r), \
+ r##_##F##_MASK, r##_##F##_SHIFT, (v))
+
+/* Wrappers for numbered field registers */
+#define SET_GLOBAL_FIELD_N(b, n, r, F, v) \
+ SET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT, v)
+#define GET_GLOBAL_FIELD_N(b, n, r, F) \
+ GET_FIELD(GLB_FIELD_N(b, n, r), r##_##F##_MASK, r##_##F##_SHIFT)
+
+#define GET_FIELD(addr, mask, shift) ((readl_relaxed(addr) >> (shift)) & (mask))
+#define GET_FIELD_Q(addr, mask, shift) \
+ ((readq_relaxed(addr) >> (shift)) & (mask))
+
+#define SET_FIELD(addr, mask, shift, v) \
+do { \
+ int t = readl_relaxed(addr); \
+ writel_relaxed((t & ~((mask) << (shift))) + (((v) & \
+ (mask)) << (shift)), addr); \
+} while (0)
+
+#define SET_FIELD_Q(addr, mask, shift, v) \
+do { \
+ u64 t = readq_relaxed(addr); \
+ writeq_relaxed((t & ~(((u64) mask) << (shift))) + (((v) & \
+ ((u64) mask)) << (shift)), addr); \
+} while (0)
+
+
+/* Global register space 0 setters / getters */
+#define SET_CR0(b, v) SET_GLOBAL_REG(CR0, (b), (v))
+#define SET_SCR1(b, v) SET_GLOBAL_REG(SCR1, (b), (v))
+#define SET_CR2(b, v) SET_GLOBAL_REG(CR2, (b), (v))
+#define SET_ACR(b, v) SET_GLOBAL_REG(ACR, (b), (v))
+#define SET_IDR0(b, N, v) SET_GLOBAL_REG(IDR0, (b), (v))
+#define SET_IDR1(b, N, v) SET_GLOBAL_REG(IDR1, (b), (v))
+#define SET_IDR2(b, N, v) SET_GLOBAL_REG(IDR2, (b), (v))
+#define SET_IDR7(b, N, v) SET_GLOBAL_REG(IDR7, (b), (v))
+#define SET_GFAR(b, v) SET_GLOBAL_REG_Q(GFAR, (b), (v))
+#define SET_GFSR(b, v) SET_GLOBAL_REG(GFSR, (b), (v))
+#define SET_GFSRRESTORE(b, v) SET_GLOBAL_REG(GFSRRESTORE, (b), (v))
+#define SET_GFSYNR0(b, v) SET_GLOBAL_REG(GFSYNR0, (b), (v))
+#define SET_GFSYNR1(b, v) SET_GLOBAL_REG(GFSYNR1, (b), (v))
+#define SET_GFSYNR2(b, v) SET_GLOBAL_REG(GFSYNR2, (b), (v))
+#define SET_TLBIVMID(b, v) SET_GLOBAL_REG(TLBIVMID, (b), (v))
+#define SET_TLBIALLNSNH(b, v) SET_GLOBAL_REG(TLBIALLNSNH, (b), (v))
+#define SET_TLBIALLH(b, v) SET_GLOBAL_REG(TLBIALLH, (b), (v))
+#define SET_TLBGSYNC(b, v) SET_GLOBAL_REG(TLBGSYNC, (b), (v))
+#define SET_TLBGSTATUS(b, v) SET_GLOBAL_REG(TLBGSTATUS, (b), (v))
+#define SET_TLBIVAH(b, v) SET_GLOBAL_REG(TLBIVAH, (b), (v))
+#define SET_GATS1UR(b, v) SET_GLOBAL_REG(GATS1UR, (b), (v))
+#define SET_GATS1UW(b, v) SET_GLOBAL_REG(GATS1UW, (b), (v))
+#define SET_GATS1PR(b, v) SET_GLOBAL_REG(GATS1PR, (b), (v))
+#define SET_GATS1PW(b, v) SET_GLOBAL_REG(GATS1PW, (b), (v))
+#define SET_GATS12UR(b, v) SET_GLOBAL_REG(GATS12UR, (b), (v))
+#define SET_GATS12UW(b, v) SET_GLOBAL_REG(GATS12UW, (b), (v))
+#define SET_GATS12PR(b, v) SET_GLOBAL_REG(GATS12PR, (b), (v))
+#define SET_GATS12PW(b, v) SET_GLOBAL_REG(GATS12PW, (b), (v))
+#define SET_GPAR(b, v) SET_GLOBAL_REG(GPAR, (b), (v))
+#define SET_GATSR(b, v) SET_GLOBAL_REG(GATSR, (b), (v))
+#define SET_NSCR0(b, v) SET_GLOBAL_REG(NSCR0, (b), (v))
+#define SET_NSCR2(b, v) SET_GLOBAL_REG(NSCR2, (b), (v))
+#define SET_NSACR(b, v) SET_GLOBAL_REG(NSACR, (b), (v))
+#define SET_NSGFAR(b, v) SET_GLOBAL_REG(NSGFAR, (b), (v))
+#define SET_NSGFSRRESTORE(b, v) SET_GLOBAL_REG(NSGFSRRESTORE, (b), (v))
+#define SET_PMCR(b, v) SET_GLOBAL_REG(PMCR, (b), (v))
+#define SET_SMR_N(b, N, v) SET_GLOBAL_REG_N(SMR, N, (b), (v))
+#define SET_S2CR_N(b, N, v) SET_GLOBAL_REG_N(S2CR, N, (b), (v))
+
+#define GET_CR0(b) GET_GLOBAL_REG(CR0, (b))
+#define GET_SCR1(b) GET_GLOBAL_REG(SCR1, (b))
+#define GET_CR2(b) GET_GLOBAL_REG(CR2, (b))
+#define GET_ACR(b) GET_GLOBAL_REG(ACR, (b))
+#define GET_IDR0(b, N) GET_GLOBAL_REG(IDR0, (b))
+#define GET_IDR1(b, N) GET_GLOBAL_REG(IDR1, (b))
+#define GET_IDR2(b, N) GET_GLOBAL_REG(IDR2, (b))
+#define GET_IDR7(b, N) GET_GLOBAL_REG(IDR7, (b))
+#define GET_GFAR(b) GET_GLOBAL_REG_Q(GFAR, (b))
+#define GET_GFSR(b) GET_GLOBAL_REG(GFSR, (b))
+#define GET_GFSRRESTORE(b) GET_GLOBAL_REG(GFSRRESTORE, (b))
+#define GET_GFSYNR0(b) GET_GLOBAL_REG(GFSYNR0, (b))
+#define GET_GFSYNR1(b) GET_GLOBAL_REG(GFSYNR1, (b))
+#define GET_GFSYNR2(b) GET_GLOBAL_REG(GFSYNR2, (b))
+#define GET_TLBIVMID(b) GET_GLOBAL_REG(TLBIVMID, (b))
+#define GET_TLBIALLNSNH(b) GET_GLOBAL_REG(TLBIALLNSNH, (b))
+#define GET_TLBIALLH(b) GET_GLOBAL_REG(TLBIALLH, (b))
+#define GET_TLBGSYNC(b) GET_GLOBAL_REG(TLBGSYNC, (b))
+#define GET_TLBGSTATUS(b) GET_GLOBAL_REG(TLBGSTATUS, (b))
+#define GET_TLBIVAH(b) GET_GLOBAL_REG(TLBIVAH, (b))
+#define GET_GATS1UR(b) GET_GLOBAL_REG(GATS1UR, (b))
+#define GET_GATS1UW(b) GET_GLOBAL_REG(GATS1UW, (b))
+#define GET_GATS1PR(b) GET_GLOBAL_REG(GATS1PR, (b))
+#define GET_GATS1PW(b) GET_GLOBAL_REG(GATS1PW, (b))
+#define GET_GATS12UR(b) GET_GLOBAL_REG(GATS12UR, (b))
+#define GET_GATS12UW(b) GET_GLOBAL_REG(GATS12UW, (b))
+#define GET_GATS12PR(b) GET_GLOBAL_REG(GATS12PR, (b))
+#define GET_GATS12PW(b) GET_GLOBAL_REG(GATS12PW, (b))
+#define GET_GPAR(b) GET_GLOBAL_REG(GPAR, (b))
+#define GET_GATSR(b) GET_GLOBAL_REG(GATSR, (b))
+#define GET_NSCR0(b) GET_GLOBAL_REG(NSCR0, (b))
+#define GET_NSCR2(b) GET_GLOBAL_REG(NSCR2, (b))
+#define GET_NSACR(b) GET_GLOBAL_REG(NSACR, (b))
+#define GET_PMCR(b) GET_GLOBAL_REG(PMCR, (b))
+#define GET_SMR_N(b, N) GET_GLOBAL_REG_N(SMR, N, (b))
+#define GET_S2CR_N(b, N) GET_GLOBAL_REG_N(S2CR, N, (b))
+
+/* Global register space 1 setters / getters */
+#define SET_CBAR_N(b, N, v) SET_GLOBAL_REG_N(CBAR, N, (b), (v))
+#define SET_CBFRSYNRA_N(b, N, v) SET_GLOBAL_REG_N(CBFRSYNRA, N, (b), (v))
+
+#define GET_CBAR_N(b, N) GET_GLOBAL_REG_N(CBAR, N, (b))
+#define GET_CBFRSYNRA_N(b, N) GET_GLOBAL_REG_N(CBFRSYNRA, N, (b))
+
+/* Implementation defined register setters/getters */
+#define SET_MICRO_MMU_CTRL_HALT_REQ(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, HALT_REQ, v)
+#define GET_MICRO_MMU_CTRL_IDLE(b) \
+ GET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, IDLE)
+#define SET_MICRO_MMU_CTRL_RESERVED(b, v) \
+ SET_GLOBAL_FIELD(b, MICRO_MMU_CTRL, RESERVED, v)
+
+#define MMU_CTRL_IDLE (MICRO_MMU_CTRL_IDLE_MASK << MICRO_MMU_CTRL_IDLE_SHIFT)
+
+#define SET_PREDICTIONDIS0(b, v) SET_GLOBAL_REG(PREDICTIONDIS0, (b), (v))
+#define SET_PREDICTIONDIS1(b, v) SET_GLOBAL_REG(PREDICTIONDIS1, (b), (v))
+#define SET_S1L1BFBLP0(b, v) SET_GLOBAL_REG(S1L1BFBLP0, (b), (v))
+
+/* SSD register setters/getters */
+#define SET_SSDR_N(b, N, v) SET_GLOBAL_REG_N(SSDR_N, N, (b), (v))
+
+#define GET_SSDR_N(b, N) GET_GLOBAL_REG_N(SSDR_N, N, (b))
+
+/* Context bank register setters/getters */
+#define SET_SCTLR(b, c, v) SET_CTX_REG(CB_SCTLR, (b), (c), (v))
+#define SET_ACTLR(b, c, v) SET_CTX_REG(CB_ACTLR, (b), (c), (v))
+#define SET_RESUME(b, c, v) SET_CTX_REG(CB_RESUME, (b), (c), (v))
+#define SET_TTBCR(b, c, v) SET_CTX_REG(CB_TTBCR, (b), (c), (v))
+#define SET_CONTEXTIDR(b, c, v) SET_CTX_REG(CB_CONTEXTIDR, (b), (c), (v))
+#define SET_PRRR(b, c, v) SET_CTX_REG(CB_PRRR, (b), (c), (v))
+#define SET_NMRR(b, c, v) SET_CTX_REG(CB_NMRR, (b), (c), (v))
+#define SET_PAR(b, c, v) SET_CTX_REG(CB_PAR, (b), (c), (v))
+#define SET_FSR(b, c, v) SET_CTX_REG(CB_FSR, (b), (c), (v))
+#define SET_FSRRESTORE(b, c, v) SET_CTX_REG(CB_FSRRESTORE, (b), (c), (v))
+#define SET_FAR(b, c, v) SET_CTX_REG(CB_FAR, (b), (c), (v))
+#define SET_FSYNR0(b, c, v) SET_CTX_REG(CB_FSYNR0, (b), (c), (v))
+#define SET_FSYNR1(b, c, v) SET_CTX_REG(CB_FSYNR1, (b), (c), (v))
+#define SET_TLBIVA(b, c, v) SET_CTX_REG(CB_TLBIVA, (b), (c), (v))
+#define SET_TLBIVAA(b, c, v) SET_CTX_REG(CB_TLBIVAA, (b), (c), (v))
+#define SET_TLBIASID(b, c, v) SET_CTX_REG(CB_TLBIASID, (b), (c), (v))
+#define SET_TLBIALL(b, c, v) SET_CTX_REG(CB_TLBIALL, (b), (c), (v))
+#define SET_TLBIVAL(b, c, v) SET_CTX_REG(CB_TLBIVAL, (b), (c), (v))
+#define SET_TLBIVAAL(b, c, v) SET_CTX_REG(CB_TLBIVAAL, (b), (c), (v))
+#define SET_TLBSYNC(b, c, v) SET_CTX_REG(CB_TLBSYNC, (b), (c), (v))
+#define SET_TLBSTATUS(b, c, v) SET_CTX_REG(CB_TLBSTATUS, (b), (c), (v))
+#define SET_ATS1PR(b, c, v) SET_CTX_REG(CB_ATS1PR, (b), (c), (v))
+#define SET_ATS1PW(b, c, v) SET_CTX_REG(CB_ATS1PW, (b), (c), (v))
+#define SET_ATS1UR(b, c, v) SET_CTX_REG(CB_ATS1UR, (b), (c), (v))
+#define SET_ATS1UW(b, c, v) SET_CTX_REG(CB_ATS1UW, (b), (c), (v))
+#define SET_ATSR(b, c, v) SET_CTX_REG(CB_ATSR, (b), (c), (v))
+
+#define GET_SCTLR(b, c) GET_CTX_REG(CB_SCTLR, (b), (c))
+#define GET_ACTLR(b, c) GET_CTX_REG(CB_ACTLR, (b), (c))
+#define GET_RESUME(b, c) GET_CTX_REG(CB_RESUME, (b), (c))
+#define GET_TTBR0(b, c) GET_CTX_REG(CB_TTBR0, (b), (c))
+#define GET_TTBR1(b, c) GET_CTX_REG(CB_TTBR1, (b), (c))
+#define GET_TTBCR(b, c) GET_CTX_REG(CB_TTBCR, (b), (c))
+#define GET_CONTEXTIDR(b, c) GET_CTX_REG(CB_CONTEXTIDR, (b), (c))
+#define GET_PRRR(b, c) GET_CTX_REG(CB_PRRR, (b), (c))
+#define GET_NMRR(b, c) GET_CTX_REG(CB_NMRR, (b), (c))
+#define GET_PAR(b, c) GET_CTX_REG_Q(CB_PAR, (b), (c))
+#define GET_FSR(b, c) GET_CTX_REG(CB_FSR, (b), (c))
+#define GET_FSRRESTORE(b, c) GET_CTX_REG(CB_FSRRESTORE, (b), (c))
+#define GET_FAR(b, c) GET_CTX_REG_Q(CB_FAR, (b), (c))
+#define GET_FSYNR0(b, c) GET_CTX_REG(CB_FSYNR0, (b), (c))
+#define GET_FSYNR1(b, c) GET_CTX_REG(CB_FSYNR1, (b), (c))
+#define GET_TLBIVA(b, c) GET_CTX_REG(CB_TLBIVA, (b), (c))
+#define GET_TLBIVAA(b, c) GET_CTX_REG(CB_TLBIVAA, (b), (c))
+#define GET_TLBIASID(b, c) GET_CTX_REG(CB_TLBIASID, (b), (c))
+#define GET_TLBIALL(b, c) GET_CTX_REG(CB_TLBIALL, (b), (c))
+#define GET_TLBIVAL(b, c) GET_CTX_REG(CB_TLBIVAL, (b), (c))
+#define GET_TLBIVAAL(b, c) GET_CTX_REG(CB_TLBIVAAL, (b), (c))
+#define GET_TLBSYNC(b, c) GET_CTX_REG(CB_TLBSYNC, (b), (c))
+#define GET_TLBSTATUS(b, c) GET_CTX_REG(CB_TLBSTATUS, (b), (c))
+#define GET_ATS1PR(b, c) GET_CTX_REG(CB_ATS1PR, (b), (c))
+#define GET_ATS1PW(b, c) GET_CTX_REG(CB_ATS1PW, (b), (c))
+#define GET_ATS1UR(b, c) GET_CTX_REG(CB_ATS1UR, (b), (c))
+#define GET_ATS1UW(b, c) GET_CTX_REG(CB_ATS1UW, (b), (c))
+#define GET_ATSR(b, c) GET_CTX_REG(CB_ATSR, (b), (c))
+
+/* Global Register field setters / getters */
+/* Configuration Register: CR0/NSCR0 */
+#define SET_CR0_NSCFG(b, v) SET_GLOBAL_FIELD(b, CR0, NSCFG, v)
+#define SET_CR0_WACFG(b, v) SET_GLOBAL_FIELD(b, CR0, WACFG, v)
+#define SET_CR0_RACFG(b, v) SET_GLOBAL_FIELD(b, CR0, RACFG, v)
+#define SET_CR0_SHCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SHCFG, v)
+#define SET_CR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, SMCFCFG, v)
+#define SET_NSCR0_SMCFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, SMCFCFG, v)
+#define SET_CR0_MTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, MTCFG, v)
+#define SET_CR0_BSU(b, v) SET_GLOBAL_FIELD(b, CR0, BSU, v)
+#define SET_CR0_FB(b, v) SET_GLOBAL_FIELD(b, CR0, FB, v)
+#define SET_CR0_PTM(b, v) SET_GLOBAL_FIELD(b, CR0, PTM, v)
+#define SET_CR0_VMIDPNE(b, v) SET_GLOBAL_FIELD(b, CR0, VMIDPNE, v)
+#define SET_CR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, CR0, USFCFG, v)
+#define SET_NSCR0_USFCFG(b, v) SET_GLOBAL_FIELD(b, NSCR0, USFCFG, v)
+#define SET_CR0_GSE(b, v) SET_GLOBAL_FIELD(b, CR0, GSE, v)
+#define SET_CR0_STALLD(b, v) SET_GLOBAL_FIELD(b, CR0, STALLD, v)
+#define SET_NSCR0_STALLD(b, v) SET_GLOBAL_FIELD(b, NSCR0, STALLD, v)
+#define SET_CR0_TRANSIENTCFG(b, v) SET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG, v)
+#define SET_CR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFIE, v)
+#define SET_NSCR0_GCFGFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFIE, v)
+#define SET_CR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GCFGFRE, v)
+#define SET_NSCR0_GCFGFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GCFGFRE, v)
+#define SET_CR0_GFIE(b, v) SET_GLOBAL_FIELD(b, CR0, GFIE, v)
+#define SET_NSCR0_GFIE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFIE, v)
+#define SET_CR0_GFRE(b, v) SET_GLOBAL_FIELD(b, CR0, GFRE, v)
+#define SET_NSCR0_GFRE(b, v) SET_GLOBAL_FIELD(b, NSCR0, GFRE, v)
+#define SET_CR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, CR0, CLIENTPD, v)
+#define SET_NSCR0_CLIENTPD(b, v) SET_GLOBAL_FIELD(b, NSCR0, CLIENTPD, v)
+
+#define SET_ACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, SMTNMC_BPTLBEN, v)
+#define SET_ACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, MMUDIS_BPTLBEN, v)
+#define SET_ACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, ACR, S2CR_BPTLBEN, v)
+
+#define SET_NSACR_SMTNMC_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, SMTNMC_BPTLBEN, v)
+#define SET_NSACR_MMUDIS_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, MMUDIS_BPTLBEN, v)
+#define SET_NSACR_S2CR_BPTLBEN(b, v)\
+ SET_GLOBAL_FIELD(b, NSACR, S2CR_BPTLBEN, v)
+
+#define GET_CR0_NSCFG(b) GET_GLOBAL_FIELD(b, CR0, NSCFG)
+#define GET_CR0_WACFG(b) GET_GLOBAL_FIELD(b, CR0, WACFG)
+#define GET_CR0_RACFG(b) GET_GLOBAL_FIELD(b, CR0, RACFG)
+#define GET_CR0_SHCFG(b) GET_GLOBAL_FIELD(b, CR0, SHCFG)
+#define GET_CR0_SMCFCFG(b) GET_GLOBAL_FIELD(b, CR0, SMCFCFG)
+#define GET_CR0_MTCFG(b) GET_GLOBAL_FIELD(b, CR0, MTCFG)
+#define GET_CR0_BSU(b) GET_GLOBAL_FIELD(b, CR0, BSU)
+#define GET_CR0_FB(b) GET_GLOBAL_FIELD(b, CR0, FB)
+#define GET_CR0_PTM(b) GET_GLOBAL_FIELD(b, CR0, PTM)
+#define GET_CR0_VMIDPNE(b) GET_GLOBAL_FIELD(b, CR0, VMIDPNE)
+#define GET_CR0_USFCFG(b) GET_GLOBAL_FIELD(b, CR0, USFCFG)
+#define GET_CR0_GSE(b) GET_GLOBAL_FIELD(b, CR0, GSE)
+#define GET_CR0_STALLD(b) GET_GLOBAL_FIELD(b, CR0, STALLD)
+#define GET_CR0_TRANSIENTCFG(b) GET_GLOBAL_FIELD(b, CR0, TRANSIENTCFG)
+#define GET_CR0_GCFGFIE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFIE)
+#define GET_CR0_GCFGFRE(b) GET_GLOBAL_FIELD(b, CR0, GCFGFRE)
+#define GET_CR0_GFIE(b) GET_GLOBAL_FIELD(b, CR0, GFIE)
+#define GET_CR0_GFRE(b) GET_GLOBAL_FIELD(b, CR0, GFRE)
+#define GET_CR0_CLIENTPD(b) GET_GLOBAL_FIELD(b, CR0, CLIENTPD)
+
+/* Configuration Register: CR2 */
+#define SET_CR2_BPVMID(b, v) SET_GLOBAL_FIELD(b, CR2, BPVMID, v)
+
+#define GET_CR2_BPVMID(b) GET_GLOBAL_FIELD(b, CR2, BPVMID)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define SET_GATS1PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PR, ADDR, v)
+#define SET_GATS1PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PR, NDX, v)
+
+#define GET_GATS1PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PR, ADDR)
+#define GET_GATS1PR_NDX(b) GET_GLOBAL_FIELD(b, GATS1PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS1PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1PW, ADDR, v)
+#define SET_GATS1PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1PW, NDX, v)
+
+#define GET_GATS1PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1PW, ADDR)
+#define GET_GATS1PW_NDX(b) GET_GLOBAL_FIELD(b, GATS1PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS1UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UR, ADDR, v)
+#define SET_GATS1UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UR, NDX, v)
+
+#define GET_GATS1UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UR, ADDR)
+#define GET_GATS1UR_NDX(b) GET_GLOBAL_FIELD(b, GATS1UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS1UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS1UW, ADDR, v)
+#define SET_GATS1UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS1UW, NDX, v)
+
+#define GET_GATS1UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS1UW, ADDR)
+#define GET_GATS1UW_NDX(b) GET_GLOBAL_FIELD(b, GATS1UW, NDX)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define SET_GATS12PR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PR, ADDR, v)
+#define SET_GATS12PR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PR, NDX, v)
+
+#define GET_GATS12PR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PR, ADDR)
+#define GET_GATS12PR_NDX(b) GET_GLOBAL_FIELD(b, GATS12PR, NDX)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define SET_GATS12PW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12PW, ADDR, v)
+#define SET_GATS12PW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12PW, NDX, v)
+
+#define GET_GATS12PW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12PW, ADDR)
+#define GET_GATS12PW_NDX(b) GET_GLOBAL_FIELD(b, GATS12PW, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define SET_GATS12UR_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UR, ADDR, v)
+#define SET_GATS12UR_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UR, NDX, v)
+
+#define GET_GATS12UR_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UR, ADDR)
+#define GET_GATS12UR_NDX(b) GET_GLOBAL_FIELD(b, GATS12UR, NDX)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UW */
+#define SET_GATS12UW_ADDR(b, v) SET_GLOBAL_FIELD(b, GATS12UW, ADDR, v)
+#define SET_GATS12UW_NDX(b, v) SET_GLOBAL_FIELD(b, GATS12UW, NDX, v)
+
+#define GET_GATS12UW_ADDR(b) GET_GLOBAL_FIELD(b, GATS12UW, ADDR)
+#define GET_GATS12UW_NDX(b) GET_GLOBAL_FIELD(b, GATS12UW, NDX)
+
+/* Global Address Translation Status Register: GATSR */
+#define SET_GATSR_ACTIVE(b, v) SET_GLOBAL_FIELD(b, GATSR, ACTIVE, v)
+
+#define GET_GATSR_ACTIVE(b) GET_GLOBAL_FIELD(b, GATSR, ACTIVE)
+
+/* Global Fault Address Register: GFAR */
+#define SET_GFAR_FADDR(b, v) SET_GLOBAL_FIELD(b, GFAR, FADDR, v)
+
+#define GET_GFAR_FADDR(b) GET_GLOBAL_FIELD(b, GFAR, FADDR)
+
+/* Global Fault Status Register: GFSR */
+#define SET_GFSR_ICF(b, v) SET_GLOBAL_FIELD(b, GFSR, ICF, v)
+#define SET_GFSR_USF(b, v) SET_GLOBAL_FIELD(b, GFSR, USF, v)
+#define SET_GFSR_SMCF(b, v) SET_GLOBAL_FIELD(b, GFSR, SMCF, v)
+#define SET_GFSR_UCBF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCBF, v)
+#define SET_GFSR_UCIF(b, v) SET_GLOBAL_FIELD(b, GFSR, UCIF, v)
+#define SET_GFSR_CAF(b, v) SET_GLOBAL_FIELD(b, GFSR, CAF, v)
+#define SET_GFSR_EF(b, v) SET_GLOBAL_FIELD(b, GFSR, EF, v)
+#define SET_GFSR_PF(b, v) SET_GLOBAL_FIELD(b, GFSR, PF, v)
+#define SET_GFSR_MULTI(b, v) SET_GLOBAL_FIELD(b, GFSR, MULTI, v)
+
+#define GET_GFSR_ICF(b) GET_GLOBAL_FIELD(b, GFSR, ICF)
+#define GET_GFSR_USF(b) GET_GLOBAL_FIELD(b, GFSR, USF)
+#define GET_GFSR_SMCF(b) GET_GLOBAL_FIELD(b, GFSR, SMCF)
+#define GET_GFSR_UCBF(b) GET_GLOBAL_FIELD(b, GFSR, UCBF)
+#define GET_GFSR_UCIF(b) GET_GLOBAL_FIELD(b, GFSR, UCIF)
+#define GET_GFSR_CAF(b) GET_GLOBAL_FIELD(b, GFSR, CAF)
+#define GET_GFSR_EF(b) GET_GLOBAL_FIELD(b, GFSR, EF)
+#define GET_GFSR_PF(b) GET_GLOBAL_FIELD(b, GFSR, PF)
+#define GET_GFSR_MULTI(b) GET_GLOBAL_FIELD(b, GFSR, MULTI)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define SET_GFSYNR0_NESTED(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NESTED, v)
+#define SET_GFSYNR0_WNR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, WNR, v)
+#define SET_GFSYNR0_PNU(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, PNU, v)
+#define SET_GFSYNR0_IND(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, IND, v)
+#define SET_GFSYNR0_NSSTATE(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE, v)
+#define SET_GFSYNR0_NSATTR(b, v) SET_GLOBAL_FIELD(b, GFSYNR0, NSATTR, v)
+
+#define GET_GFSYNR0_NESTED(b) GET_GLOBAL_FIELD(b, GFSYNR0, NESTED)
+#define GET_GFSYNR0_WNR(b) GET_GLOBAL_FIELD(b, GFSYNR0, WNR)
+#define GET_GFSYNR0_PNU(b) GET_GLOBAL_FIELD(b, GFSYNR0, PNU)
+#define GET_GFSYNR0_IND(b) GET_GLOBAL_FIELD(b, GFSYNR0, IND)
+#define GET_GFSYNR0_NSSTATE(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSSTATE)
+#define GET_GFSYNR0_NSATTR(b) GET_GLOBAL_FIELD(b, GFSYNR0, NSATTR)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define SET_GFSYNR1_SID(b, v) SET_GLOBAL_FIELD(b, GFSYNR1, SID, v)
+
+#define GET_GFSYNR1_SID(b) GET_GLOBAL_FIELD(b, GFSYNR1, SID)
+
+/* Global Physical Address Register: GPAR */
+#define SET_GPAR_F(b, v) SET_GLOBAL_FIELD(b, GPAR, F, v)
+#define SET_GPAR_SS(b, v) SET_GLOBAL_FIELD(b, GPAR, SS, v)
+#define SET_GPAR_OUTER(b, v) SET_GLOBAL_FIELD(b, GPAR, OUTER, v)
+#define SET_GPAR_INNER(b, v) SET_GLOBAL_FIELD(b, GPAR, INNER, v)
+#define SET_GPAR_SH(b, v) SET_GLOBAL_FIELD(b, GPAR, SH, v)
+#define SET_GPAR_NS(b, v) SET_GLOBAL_FIELD(b, GPAR, NS, v)
+#define SET_GPAR_NOS(b, v) SET_GLOBAL_FIELD(b, GPAR, NOS, v)
+#define SET_GPAR_PA(b, v) SET_GLOBAL_FIELD(b, GPAR, PA, v)
+#define SET_GPAR_TF(b, v) SET_GLOBAL_FIELD(b, GPAR, TF, v)
+#define SET_GPAR_AFF(b, v) SET_GLOBAL_FIELD(b, GPAR, AFF, v)
+#define SET_GPAR_PF(b, v) SET_GLOBAL_FIELD(b, GPAR, PF, v)
+#define SET_GPAR_EF(b, v) SET_GLOBAL_FIELD(b, GPAR, EF, v)
+#define SET_GPAR_TLCMCF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLCMCF, v)
+#define SET_GPAR_TLBLKF(b, v) SET_GLOBAL_FIELD(b, GPAR, TLBLKF, v)
+#define SET_GPAR_UCBF(b, v) SET_GLOBAL_FIELD(b, GPAR, UCBF, v)
+
+#define GET_GPAR_F(b) GET_GLOBAL_FIELD(b, GPAR, F)
+#define GET_GPAR_SS(b) GET_GLOBAL_FIELD(b, GPAR, SS)
+#define GET_GPAR_OUTER(b) GET_GLOBAL_FIELD(b, GPAR, OUTER)
+#define GET_GPAR_INNER(b) GET_GLOBAL_FIELD(b, GPAR, INNER)
+#define GET_GPAR_SH(b) GET_GLOBAL_FIELD(b, GPAR, SH)
+#define GET_GPAR_NS(b) GET_GLOBAL_FIELD(b, GPAR, NS)
+#define GET_GPAR_NOS(b) GET_GLOBAL_FIELD(b, GPAR, NOS)
+#define GET_GPAR_PA(b) GET_GLOBAL_FIELD(b, GPAR, PA)
+#define GET_GPAR_TF(b) GET_GLOBAL_FIELD(b, GPAR, TF)
+#define GET_GPAR_AFF(b) GET_GLOBAL_FIELD(b, GPAR, AFF)
+#define GET_GPAR_PF(b) GET_GLOBAL_FIELD(b, GPAR, PF)
+#define GET_GPAR_EF(b) GET_GLOBAL_FIELD(b, GPAR, EF)
+#define GET_GPAR_TLCMCF(b) GET_GLOBAL_FIELD(b, GPAR, TLCMCF)
+#define GET_GPAR_TLBLKF(b) GET_GLOBAL_FIELD(b, GPAR, TLBLKF)
+#define GET_GPAR_UCBF(b) GET_GLOBAL_FIELD(b, GPAR, UCBF)
+
+/* Identification Register: IDR0 */
+#define SET_IDR0_NUMSMRG(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSMRG, v)
+#define SET_IDR0_NUMSIDB(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMSIDB, v)
+#define SET_IDR0_BTM(b, v) SET_GLOBAL_FIELD(b, IDR0, BTM, v)
+#define SET_IDR0_CTTW(b, v) SET_GLOBAL_FIELD(b, IDR0, CTTW, v)
+#define SET_IDR0_NUMIRPT(b, v) SET_GLOBAL_FIELD(b, IDR0, NUMIRPT, v)
+#define SET_IDR0_PTFS(b, v) SET_GLOBAL_FIELD(b, IDR0, PTFS, v)
+#define SET_IDR0_SMS(b, v) SET_GLOBAL_FIELD(b, IDR0, SMS, v)
+#define SET_IDR0_NTS(b, v) SET_GLOBAL_FIELD(b, IDR0, NTS, v)
+#define SET_IDR0_S2TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S2TS, v)
+#define SET_IDR0_S1TS(b, v) SET_GLOBAL_FIELD(b, IDR0, S1TS, v)
+#define SET_IDR0_SES(b, v) SET_GLOBAL_FIELD(b, IDR0, SES, v)
+
+#define GET_IDR0_NUMSMRG(b) GET_GLOBAL_FIELD(b, IDR0, NUMSMRG)
+#define GET_IDR0_NUMSIDB(b) GET_GLOBAL_FIELD(b, IDR0, NUMSIDB)
+#define GET_IDR0_BTM(b) GET_GLOBAL_FIELD(b, IDR0, BTM)
+#define GET_IDR0_CTTW(b) GET_GLOBAL_FIELD(b, IDR0, CTTW)
+#define GET_IDR0_NUMIRPT(b) GET_GLOBAL_FIELD(b, IDR0, NUMIRPT)
+#define GET_IDR0_PTFS(b) GET_GLOBAL_FIELD(b, IDR0, PTFS)
+#define GET_IDR0_SMS(b) GET_GLOBAL_FIELD(b, IDR0, SMS)
+#define GET_IDR0_NTS(b) GET_GLOBAL_FIELD(b, IDR0, NTS)
+#define GET_IDR0_S2TS(b) GET_GLOBAL_FIELD(b, IDR0, S2TS)
+#define GET_IDR0_S1TS(b) GET_GLOBAL_FIELD(b, IDR0, S1TS)
+#define GET_IDR0_SES(b) GET_GLOBAL_FIELD(b, IDR0, SES)
+
+/* Identification Register: IDR1 */
+#define SET_IDR1_NUMCB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMCB, v)
+#define SET_IDR1_NUMSSDNDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB, v)
+#define SET_IDR1_SSDTP(b, v) SET_GLOBAL_FIELD(b, IDR1, SSDTP, v)
+#define SET_IDR1_SMCD(b, v) SET_GLOBAL_FIELD(b, IDR1, SMCD, v)
+#define SET_IDR1_NUMS2CB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMS2CB, v)
+#define SET_IDR1_NUMPAGENDXB(b, v) SET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB, v)
+#define SET_IDR1_PAGESIZE(b, v) SET_GLOBAL_FIELD(b, IDR1, PAGESIZE, v)
+
+#define GET_IDR1_NUMCB(b) GET_GLOBAL_FIELD(b, IDR1, NUMCB)
+#define GET_IDR1_NUMSSDNDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMSSDNDXB)
+#define GET_IDR1_SSDTP(b) GET_GLOBAL_FIELD(b, IDR1, SSDTP)
+#define GET_IDR1_SMCD(b) GET_GLOBAL_FIELD(b, IDR1, SMCD)
+#define GET_IDR1_NUMS2CB(b) GET_GLOBAL_FIELD(b, IDR1, NUMS2CB)
+#define GET_IDR1_NUMPAGENDXB(b) GET_GLOBAL_FIELD(b, IDR1, NUMPAGENDXB)
+#define GET_IDR1_PAGESIZE(b) GET_GLOBAL_FIELD(b, IDR1, PAGESIZE)
+
+/* Identification Register: IDR2 */
+#define SET_IDR2_IAS(b, v) SET_GLOBAL_FIELD(b, IDR2, IAS, v)
+#define SET_IDR2_OAS(b, v) SET_GLOBAL_FIELD(b, IDR2, OAS, v)
+
+#define GET_IDR2_IAS(b) GET_GLOBAL_FIELD(b, IDR2, IAS)
+#define GET_IDR2_OAS(b) GET_GLOBAL_FIELD(b, IDR2, OAS)
+
+/* Identification Register: IDR7 */
+#define SET_IDR7_MINOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MINOR, v)
+#define SET_IDR7_MAJOR(b, v) SET_GLOBAL_FIELD(b, IDR7, MAJOR, v)
+
+#define GET_IDR7_MINOR(b) GET_GLOBAL_FIELD(b, IDR7, MINOR)
+#define GET_IDR7_MAJOR(b) GET_GLOBAL_FIELD(b, IDR7, MAJOR)
+
+/* Stream to Context Register: S2CR_N */
+#define SET_S2CR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX, v)
+#define SET_S2CR_SHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG, v)
+#define SET_S2CR_MTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG, v)
+#define SET_S2CR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR, v)
+#define SET_S2CR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, TYPE, v)
+#define SET_S2CR_NSCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG, v)
+#define SET_S2CR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, RACFG, v)
+#define SET_S2CR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, WACFG, v)
+#define SET_S2CR_PRIVCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG, v)
+#define SET_S2CR_INSTCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG, v)
+#define SET_S2CR_TRANSIENTCFG(b, n, v) \
+ SET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG, v)
+#define SET_S2CR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, VMID, v)
+#define SET_S2CR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, BSU, v)
+#define SET_S2CR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, S2CR, FB, v)
+
+#define GET_S2CR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, CBNDX)
+#define GET_S2CR_SHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, SHCFG)
+#define GET_S2CR_MTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MTCFG)
+#define GET_S2CR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, MEMATTR)
+#define GET_S2CR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, TYPE)
+#define GET_S2CR_NSCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, NSCFG)
+#define GET_S2CR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, RACFG)
+#define GET_S2CR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, WACFG)
+#define GET_S2CR_PRIVCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, PRIVCFG)
+#define GET_S2CR_INSTCFG(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, INSTCFG)
+#define GET_S2CR_TRANSIENTCFG(b, n) \
+ GET_GLOBAL_FIELD_N(b, n, S2CR, TRANSIENTCFG)
+#define GET_S2CR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, VMID)
+#define GET_S2CR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, BSU)
+#define GET_S2CR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, S2CR, FB)
+
+/* Stream Match Register: SMR_N */
+#define SET_SMR_ID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, ID, v)
+#define SET_SMR_MASK(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, MASK, v)
+#define SET_SMR_VALID(b, n, v) SET_GLOBAL_FIELD_N(b, n, SMR, VALID, v)
+
+#define GET_SMR_ID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, ID)
+#define GET_SMR_MASK(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, MASK)
+#define GET_SMR_VALID(b, n) GET_GLOBAL_FIELD_N(b, n, SMR, VALID)
+
+/* Global TLB Status: TLBGSTATUS */
+#define SET_TLBGSTATUS_GSACTIVE(b, v) \
+ SET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE, v)
+
+#define GET_TLBGSTATUS_GSACTIVE(b) \
+ GET_GLOBAL_FIELD(b, TLBGSTATUS, GSACTIVE)
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define SET_TLBIVAH_ADDR(b, v) SET_GLOBAL_FIELD(b, TLBIVAH, ADDR, v)
+
+#define GET_TLBIVAH_ADDR(b) GET_GLOBAL_FIELD(b, TLBIVAH, ADDR)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define SET_TLBIVMID_VMID(b, v) SET_GLOBAL_FIELD(b, TLBIVMID, VMID, v)
+
+#define GET_TLBIVMID_VMID(b) GET_GLOBAL_FIELD(b, TLBIVMID, VMID)
+
+/* Global Register Space 1 Field setters/getters*/
+/* Context Bank Attribute Register: CBAR_N */
+#define SET_CBAR_VMID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, VMID, v)
+#define SET_CBAR_CBNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX, v)
+#define SET_CBAR_BPSHCFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG, v)
+#define SET_CBAR_HYPC(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, HYPC, v)
+#define SET_CBAR_FB(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, FB, v)
+#define SET_CBAR_MEMATTR(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR, v)
+#define SET_CBAR_TYPE(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, TYPE, v)
+#define SET_CBAR_BSU(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, BSU, v)
+#define SET_CBAR_RACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, RACFG, v)
+#define SET_CBAR_WACFG(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, WACFG, v)
+#define SET_CBAR_IRPTNDX(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX, v)
+
+#define GET_CBAR_VMID(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, VMID)
+#define GET_CBAR_CBNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, CBNDX)
+#define GET_CBAR_BPSHCFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BPSHCFG)
+#define GET_CBAR_HYPC(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, HYPC)
+#define GET_CBAR_FB(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, FB)
+#define GET_CBAR_MEMATTR(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, MEMATTR)
+#define GET_CBAR_TYPE(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, TYPE)
+#define GET_CBAR_BSU(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, BSU)
+#define GET_CBAR_RACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, RACFG)
+#define GET_CBAR_WACFG(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, WACFG)
+#define GET_CBAR_IRPTNDX(b, n) GET_GLOBAL_FIELD_N(b, n, CBAR, IRPTNDX)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA_N */
+#define SET_CBFRSYNRA_SID(b, n, v) SET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID, v)
+
+#define GET_CBFRSYNRA_SID(b, n) GET_GLOBAL_FIELD_N(b, n, CBFRSYNRA, SID)
+
+/* Stage 1 Context Bank Format Fields */
+#define SET_CB_ACTLR_REQPRIORITY (b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY, v)
+#define SET_CB_ACTLR_REQPRIORITYCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG, v)
+#define SET_CB_ACTLR_PRIVCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG, v)
+#define SET_CB_ACTLR_BPRCOSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH, v)
+#define SET_CB_ACTLR_BPRCISH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH, v)
+#define SET_CB_ACTLR_BPRCNSH(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH, v)
+
+#define GET_CB_ACTLR_REQPRIORITY (b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITY)
+#define GET_CB_ACTLR_REQPRIORITYCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_ACTLR, REQPRIORITYCFG)
+#define GET_CB_ACTLR_PRIVCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, PRIVCFG)
+#define GET_CB_ACTLR_BPRCOSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCOSH)
+#define GET_CB_ACTLR_BPRCISH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCISH)
+#define GET_CB_ACTLR_BPRCNSH(b, c) GET_CONTEXT_FIELD(b, c, CB_ACTLR, BPRCNSH)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define SET_CB_ATS1PR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR, v)
+
+#define GET_CB_ATS1PR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PR, ADDR)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define SET_CB_ATS1PW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR, v)
+
+#define GET_CB_ATS1PW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1PW, ADDR)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define SET_CB_ATS1UR_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR, v)
+
+#define GET_CB_ATS1UR_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UR, ADDR)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define SET_CB_ATS1UW_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR, v)
+
+#define GET_CB_ATS1UW_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_ATS1UW, ADDR)
+
+/* Address Translation Status Register: CB_ATSR */
+#define SET_CB_ATSR_ACTIVE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE, v)
+
+#define GET_CB_ATSR_ACTIVE(b, c) GET_CONTEXT_FIELD(b, c, CB_ATSR, ACTIVE)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define SET_CB_CONTEXTIDR_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID, v)
+#define SET_CB_CONTEXTIDR_PROCID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID, v)
+
+#define GET_CB_CONTEXTIDR_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, ASID)
+#define GET_CB_CONTEXTIDR_PROCID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_CONTEXTIDR, PROCID)
+
+/* Fault Address Register: CB_FAR */
+#define SET_CB_FAR_FADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FAR, FADDR, v)
+
+#define GET_CB_FAR_FADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_FAR, FADDR)
+
+/* Fault Status Register: CB_FSR */
+#define SET_CB_FSR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TF, v)
+#define SET_CB_FSR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, AFF, v)
+#define SET_CB_FSR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, PF, v)
+#define SET_CB_FSR_EF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, EF, v)
+#define SET_CB_FSR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF, v)
+#define SET_CB_FSR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF, v)
+#define SET_CB_FSR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, SS, v)
+#define SET_CB_FSR_MULTI(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSR, MULTI, v)
+
+#define GET_CB_FSR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TF)
+#define GET_CB_FSR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, AFF)
+#define GET_CB_FSR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, PF)
+#define GET_CB_FSR_EF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, EF)
+#define GET_CB_FSR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBMCF)
+#define GET_CB_FSR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, TLBLKF)
+#define GET_CB_FSR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, SS)
+#define GET_CB_FSR_MULTI(b, c) GET_CONTEXT_FIELD(b, c, CB_FSR, MULTI)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define SET_CB_FSYNR0_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL, v)
+#define SET_CB_FSYNR0_S1PTWF(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF, v)
+#define SET_CB_FSYNR0_WNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR, v)
+#define SET_CB_FSYNR0_PNU(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU, v)
+#define SET_CB_FSYNR0_IND(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND, v)
+#define SET_CB_FSYNR0_NSSTATE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE, v)
+#define SET_CB_FSYNR0_NSATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR, v)
+#define SET_CB_FSYNR0_ATOF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF, v)
+#define SET_CB_FSYNR0_PTWF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF, v)
+#define SET_CB_FSYNR0_AFR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR, v)
+#define SET_CB_FSYNR0_S1CBNDX(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX, v)
+
+#define GET_CB_FSYNR0_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PLVL)
+#define GET_CB_FSYNR0_S1PTWF(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1PTWF)
+#define GET_CB_FSYNR0_WNR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, WNR)
+#define GET_CB_FSYNR0_PNU(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PNU)
+#define GET_CB_FSYNR0_IND(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, IND)
+#define GET_CB_FSYNR0_NSSTATE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSSTATE)
+#define GET_CB_FSYNR0_NSATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, NSATTR)
+#define GET_CB_FSYNR0_ATOF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, ATOF)
+#define GET_CB_FSYNR0_PTWF(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, PTWF)
+#define GET_CB_FSYNR0_AFR(b, c) GET_CONTEXT_FIELD(b, c, CB_FSYNR0, AFR)
+#define GET_CB_FSYNR0_S1CBNDX(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_FSYNR0, S1CBNDX)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define SET_CB_NMRR_IR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR0, v)
+#define SET_CB_NMRR_IR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR1, v)
+#define SET_CB_NMRR_IR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR2, v)
+#define SET_CB_NMRR_IR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR3, v)
+#define SET_CB_NMRR_IR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR4, v)
+#define SET_CB_NMRR_IR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR5, v)
+#define SET_CB_NMRR_IR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR6, v)
+#define SET_CB_NMRR_IR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, IR7, v)
+#define SET_CB_NMRR_OR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR0, v)
+#define SET_CB_NMRR_OR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR1, v)
+#define SET_CB_NMRR_OR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR2, v)
+#define SET_CB_NMRR_OR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR3, v)
+#define SET_CB_NMRR_OR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR4, v)
+#define SET_CB_NMRR_OR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR5, v)
+#define SET_CB_NMRR_OR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR6, v)
+#define SET_CB_NMRR_OR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_NMRR, OR7, v)
+
+#define GET_CB_NMRR_IR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR0)
+#define GET_CB_NMRR_IR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR1)
+#define GET_CB_NMRR_IR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR2)
+#define GET_CB_NMRR_IR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR3)
+#define GET_CB_NMRR_IR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR4)
+#define GET_CB_NMRR_IR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR5)
+#define GET_CB_NMRR_IR6(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR6)
+#define GET_CB_NMRR_IR7(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, IR7)
+#define GET_CB_NMRR_OR0(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR0)
+#define GET_CB_NMRR_OR1(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR1)
+#define GET_CB_NMRR_OR2(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR2)
+#define GET_CB_NMRR_OR3(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR3)
+#define GET_CB_NMRR_OR4(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR4)
+#define GET_CB_NMRR_OR5(b, c) GET_CONTEXT_FIELD(b, c, CB_NMRR, OR5)
+
+/* Physical Address Register: CB_PAR */
+#define SET_CB_PAR_F(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, F, v)
+#define SET_CB_PAR_SS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SS, v)
+#define SET_CB_PAR_OUTER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, OUTER, v)
+#define SET_CB_PAR_INNER(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, INNER, v)
+#define SET_CB_PAR_SH(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, SH, v)
+#define SET_CB_PAR_NS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NS, v)
+#define SET_CB_PAR_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, NOS, v)
+#define SET_CB_PAR_PA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PA, v)
+#define SET_CB_PAR_TF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TF, v)
+#define SET_CB_PAR_AFF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, AFF, v)
+#define SET_CB_PAR_PF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PF, v)
+#define SET_CB_PAR_TLBMCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF, v)
+#define SET_CB_PAR_TLBLKF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF, v)
+#define SET_CB_PAR_ATOT(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, ATOT, v)
+#define SET_CB_PAR_PLVL(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, PLVL, v)
+#define SET_CB_PAR_STAGE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PAR, STAGE, v)
+
+#define GET_CB_PAR_F(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, F)
+#define GET_CB_PAR_SS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SS)
+#define GET_CB_PAR_OUTER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, OUTER)
+#define GET_CB_PAR_INNER(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, INNER)
+#define GET_CB_PAR_SH(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, SH)
+#define GET_CB_PAR_NS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NS)
+#define GET_CB_PAR_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, NOS)
+#define GET_CB_PAR_PA(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PA)
+#define GET_CB_PAR_TF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TF)
+#define GET_CB_PAR_AFF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, AFF)
+#define GET_CB_PAR_PF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PF)
+#define GET_CB_PAR_TLBMCF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBMCF)
+#define GET_CB_PAR_TLBLKF(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, TLBLKF)
+#define GET_CB_PAR_ATOT(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, ATOT)
+#define GET_CB_PAR_PLVL(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, PLVL)
+#define GET_CB_PAR_STAGE(b, c) GET_CONTEXT_FIELD(b, c, CB_PAR, STAGE)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define SET_CB_PRRR_TR0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR0, v)
+#define SET_CB_PRRR_TR1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR1, v)
+#define SET_CB_PRRR_TR2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR2, v)
+#define SET_CB_PRRR_TR3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR3, v)
+#define SET_CB_PRRR_TR4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR4, v)
+#define SET_CB_PRRR_TR5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR5, v)
+#define SET_CB_PRRR_TR6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR6, v)
+#define SET_CB_PRRR_TR7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, TR7, v)
+#define SET_CB_PRRR_DS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS0, v)
+#define SET_CB_PRRR_DS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, DS1, v)
+#define SET_CB_PRRR_NS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS0, v)
+#define SET_CB_PRRR_NS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NS1, v)
+#define SET_CB_PRRR_NOS0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0, v)
+#define SET_CB_PRRR_NOS1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1, v)
+#define SET_CB_PRRR_NOS2(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2, v)
+#define SET_CB_PRRR_NOS3(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3, v)
+#define SET_CB_PRRR_NOS4(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4, v)
+#define SET_CB_PRRR_NOS5(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5, v)
+#define SET_CB_PRRR_NOS6(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6, v)
+#define SET_CB_PRRR_NOS7(b, c, v) SET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7, v)
+
+#define GET_CB_PRRR_TR0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR0)
+#define GET_CB_PRRR_TR1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR1)
+#define GET_CB_PRRR_TR2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR2)
+#define GET_CB_PRRR_TR3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR3)
+#define GET_CB_PRRR_TR4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR4)
+#define GET_CB_PRRR_TR5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR5)
+#define GET_CB_PRRR_TR6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR6)
+#define GET_CB_PRRR_TR7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, TR7)
+#define GET_CB_PRRR_DS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS0)
+#define GET_CB_PRRR_DS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, DS1)
+#define GET_CB_PRRR_NS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS0)
+#define GET_CB_PRRR_NS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NS1)
+#define GET_CB_PRRR_NOS0(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS0)
+#define GET_CB_PRRR_NOS1(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS1)
+#define GET_CB_PRRR_NOS2(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS2)
+#define GET_CB_PRRR_NOS3(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS3)
+#define GET_CB_PRRR_NOS4(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS4)
+#define GET_CB_PRRR_NOS5(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS5)
+#define GET_CB_PRRR_NOS6(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS6)
+#define GET_CB_PRRR_NOS7(b, c) GET_CONTEXT_FIELD(b, c, CB_PRRR, NOS7)
+
+/* Transaction Resume: CB_RESUME */
+#define SET_CB_RESUME_TNR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_RESUME, TNR, v)
+
+#define GET_CB_RESUME_TNR(b, c) GET_CONTEXT_FIELD(b, c, CB_RESUME, TNR)
+
+/* System Control Register: CB_SCTLR */
+#define SET_CB_SCTLR_M(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, M, v)
+#define SET_CB_SCTLR_TRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE, v)
+#define SET_CB_SCTLR_AFE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE, v)
+#define SET_CB_SCTLR_AFFD(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD, v)
+#define SET_CB_SCTLR_E(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, E, v)
+#define SET_CB_SCTLR_CFRE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE, v)
+#define SET_CB_SCTLR_CFIE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE, v)
+#define SET_CB_SCTLR_CFCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG, v)
+#define SET_CB_SCTLR_HUPCF(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF, v)
+#define SET_CB_SCTLR_WXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN, v)
+#define SET_CB_SCTLR_UWXN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN, v)
+#define SET_CB_SCTLR_ASIDPNE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE, v)
+#define SET_CB_SCTLR_TRANSIENTCFG(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG, v)
+#define SET_CB_SCTLR_MEMATTR(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR, v)
+#define SET_CB_SCTLR_MTCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG, v)
+#define SET_CB_SCTLR_SHCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG, v)
+#define SET_CB_SCTLR_RACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG, v)
+#define SET_CB_SCTLR_WACFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG, v)
+#define SET_CB_SCTLR_NSCFG(b, c, v) SET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG, v)
+
+#define GET_CB_SCTLR_M(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, M)
+#define GET_CB_SCTLR_TRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRE)
+#define GET_CB_SCTLR_AFE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFE)
+#define GET_CB_SCTLR_AFFD(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, AFFD)
+#define GET_CB_SCTLR_E(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, E)
+#define GET_CB_SCTLR_CFRE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFRE)
+#define GET_CB_SCTLR_CFIE(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFIE)
+#define GET_CB_SCTLR_CFCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, CFCFG)
+#define GET_CB_SCTLR_HUPCF(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, HUPCF)
+#define GET_CB_SCTLR_WXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WXN)
+#define GET_CB_SCTLR_UWXN(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, UWXN)
+#define GET_CB_SCTLR_ASIDPNE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, ASIDPNE)
+#define GET_CB_SCTLR_TRANSIENTCFG(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, TRANSIENTCFG)
+#define GET_CB_SCTLR_MEMATTR(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_SCTLR, MEMATTR)
+#define GET_CB_SCTLR_MTCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, MTCFG)
+#define GET_CB_SCTLR_SHCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, SHCFG)
+#define GET_CB_SCTLR_RACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, RACFG)
+#define GET_CB_SCTLR_WACFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, WACFG)
+#define GET_CB_SCTLR_NSCFG(b, c) GET_CONTEXT_FIELD(b, c, CB_SCTLR, NSCFG)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define SET_CB_TLBIASID_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID, v)
+
+#define GET_CB_TLBIASID_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIASID, ASID)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define SET_CB_TLBIVA_ASID(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID, v)
+#define SET_CB_TLBIVA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA, v)
+
+#define GET_CB_TLBIVA_ASID(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, ASID)
+#define GET_CB_TLBIVA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVA, VA)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define SET_CB_TLBIVAA_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA, v)
+
+#define GET_CB_TLBIVAA_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAA, VA)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define SET_CB_TLBIVAAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA, v)
+
+#define GET_CB_TLBIVAAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAAL, VA)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define SET_CB_TLBIVAL_ASID(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID, v)
+#define SET_CB_TLBIVAL_VA(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA, v)
+
+#define GET_CB_TLBIVAL_ASID(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, ASID)
+#define GET_CB_TLBIVAL_VA(b, c) GET_CONTEXT_FIELD(b, c, CB_TLBIVAL, VA)
+
+/* TLB Status: CB_TLBSTATUS */
+#define SET_CB_TLBSTATUS_SACTIVE(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE, v)
+
+#define GET_CB_TLBSTATUS_SACTIVE(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TLBSTATUS, SACTIVE)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+/* These are shared between VMSA and LPAE */
+#define GET_CB_TTBCR_EAE(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE)
+#define SET_CB_TTBCR_EAE(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EAE, v)
+
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define GET_CB_TTBCR_NSCFG0(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0)
+#define GET_CB_TTBCR_NSCFG1(b, c) \
+ GET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1)
+
+#ifdef CONFIG_IOMMU_LPAE
+
+/* LPAE format */
+
+/* Translation Table Base Register 0: CB_TTBR */
+#define SET_TTBR0(b, c, v) SET_CTX_REG_Q(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG_Q(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBR0_ASID(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_ASID(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ASID)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD_Q(b, c, CB_TTBR0, ADDR)
+#define GET_CB_TTBR0(b, c) GET_CTX_REG_Q(CB_TTBR0, (b), (c))
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define SET_CB_TTBCR_T0SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ, v)
+#define SET_CB_TTBCR_T1SZ(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ, v)
+#define SET_CB_TTBCR_EPD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0, v)
+#define SET_CB_TTBCR_EPD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1, v)
+#define SET_CB_TTBCR_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0, v)
+#define SET_CB_TTBCR_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1, v)
+#define SET_CB_TTBCR_ORGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0, v)
+#define SET_CB_TTBCR_ORGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1, v)
+#define SET_CB_TTBCR_NSCFG0(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG0, v)
+#define SET_CB_TTBCR_NSCFG1(b, c, v) \
+ SET_CONTEXT_FIELD(b, c, CB_TTBCR, NSCFG1, v)
+
+#define SET_CB_TTBCR_SH0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH0, v)
+#define SET_CB_TTBCR_SH1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, SH1, v)
+#define SET_CB_TTBCR_A1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, A1, v)
+
+#define GET_CB_TTBCR_T0SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T0SZ)
+#define GET_CB_TTBCR_T1SZ(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, T1SZ)
+#define GET_CB_TTBCR_EPD0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD0)
+#define GET_CB_TTBCR_EPD1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBCR, EPD1)
+#define GET_CB_TTBCR_IRGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN0)
+#define GET_CB_TTBCR_IRGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, IRGN1)
+#define GET_CB_TTBCR_ORGN0(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN0)
+#define GET_CB_TTBCR_ORGN1(b, c, v) GET_CONTEXT_FIELD(b, c, CB_TTBCR, ORGN1)
+
+#define SET_CB_MAIR0(b, c, v) SET_CTX_REG(CB_MAIR0, (b), (c), (v))
+#define SET_CB_MAIR1(b, c, v) SET_CTX_REG(CB_MAIR1, (b), (c), (v))
+
+#define GET_CB_MAIR0(b, c) GET_CTX_REG(CB_MAIR0, (b), (c))
+#define GET_CB_MAIR1(b, c) GET_CTX_REG(CB_MAIR1, (b), (c))
+#else
+#define SET_TTBR0(b, c, v) SET_CTX_REG(CB_TTBR0, (b), (c), (v))
+#define SET_TTBR1(b, c, v) SET_CTX_REG(CB_TTBR1, (b), (c), (v))
+
+#define SET_CB_TTBCR_PD0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD0, v)
+#define SET_CB_TTBCR_PD1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBCR, PD1, v)
+
+#define SET_CB_TTBR0_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1, v)
+#define SET_CB_TTBR0_S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, S, v)
+#define SET_CB_TTBR0_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN, v)
+#define SET_CB_TTBR0_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS, v)
+#define SET_CB_TTBR0_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0, v)
+#define SET_CB_TTBR0_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR, v)
+
+#define GET_CB_TTBR0_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN1)
+#define GET_CB_TTBR0_S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, S)
+#define GET_CB_TTBR0_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, RGN)
+#define GET_CB_TTBR0_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, NOS)
+#define GET_CB_TTBR0_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, IRGN0)
+#define GET_CB_TTBR0_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR0, ADDR)
+#endif
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define SET_CB_TTBR1_IRGN1(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1, v)
+#define SET_CB_TTBR1_0S(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, S, v)
+#define SET_CB_TTBR1_RGN(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN, v)
+#define SET_CB_TTBR1_NOS(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS, v)
+#define SET_CB_TTBR1_IRGN0(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0, v)
+#define SET_CB_TTBR1_ADDR(b, c, v) SET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR, v)
+
+#define GET_CB_TTBR1_IRGN1(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN1)
+#define GET_CB_TTBR1_0S(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, S)
+#define GET_CB_TTBR1_RGN(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, RGN)
+#define GET_CB_TTBR1_NOS(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, NOS)
+#define GET_CB_TTBR1_IRGN0(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, IRGN0)
+#define GET_CB_TTBR1_ADDR(b, c) GET_CONTEXT_FIELD(b, c, CB_TTBR1, ADDR)
+
+/* Global Register Space 0 */
+#define CR0 (0x0000)
+#define SCR1 (0x0004)
+#define CR2 (0x0008)
+#define ACR (0x0010)
+#define IDR0 (0x0020)
+#define IDR1 (0x0024)
+#define IDR2 (0x0028)
+#define IDR7 (0x003C)
+#define GFAR (0x0040)
+#define GFSR (0x0048)
+#define GFSRRESTORE (0x004C)
+#define GFSYNR0 (0x0050)
+#define GFSYNR1 (0x0054)
+#define GFSYNR2 (0x0058)
+#define TLBIVMID (0x0064)
+#define TLBIALLNSNH (0x0068)
+#define TLBIALLH (0x006C)
+#define TLBGSYNC (0x0070)
+#define TLBGSTATUS (0x0074)
+#define TLBIVAH (0x0078)
+#define GATS1UR (0x0100)
+#define GATS1UW (0x0108)
+#define GATS1PR (0x0110)
+#define GATS1PW (0x0118)
+#define GATS12UR (0x0120)
+#define GATS12UW (0x0128)
+#define GATS12PR (0x0130)
+#define GATS12PW (0x0138)
+#define GPAR (0x0180)
+#define GATSR (0x0188)
+#define NSCR0 (0x0400)
+#define NSCR2 (0x0408)
+#define NSACR (0x0410)
+#define NSGFAR (0x0440)
+#define NSGFSRRESTORE (0x044C)
+#define SMR (0x0800)
+#define S2CR (0x0C00)
+
+/* SMMU_LOCAL */
+#define SMMU_INTR_SEL_NS (0x2000)
+
+/* Global Register Space 1 */
+#define CBAR (0x1000)
+#define CBFRSYNRA (0x1400)
+
+/* Implementation defined Register Space */
+#define MICRO_MMU_CTRL (0x2000)
+#define PREDICTIONDIS0 (0x204C)
+#define PREDICTIONDIS1 (0x2050)
+#define S1L1BFBLP0 (0x215C)
+
+/* Performance Monitoring Register Space */
+#define PMEVCNTR_N (0x3000)
+#define PMEVTYPER_N (0x3400)
+#define PMCGCR_N (0x3800)
+#define PMCGSMR_N (0x3A00)
+#define PMCNTENSET_N (0x3C00)
+#define PMCNTENCLR_N (0x3C20)
+#define PMINTENSET_N (0x3C40)
+#define PMINTENCLR_N (0x3C60)
+#define PMOVSCLR_N (0x3C80)
+#define PMOVSSET_N (0x3CC0)
+#define PMCFGR (0x3E00)
+#define PMCR (0x3E04)
+#define PMCEID0 (0x3E20)
+#define PMCEID1 (0x3E24)
+#define PMAUTHSTATUS (0x3FB8)
+#define PMDEVTYPE (0x3FCC)
+
+/* Secure Status Determination Address Space */
+#define SSDR_N (0x4000)
+
+/* Stage 1 Context Bank Format */
+#define CB_SCTLR (0x000)
+#define CB_ACTLR (0x004)
+#define CB_RESUME (0x008)
+#define CB_TTBR0 (0x020)
+#define CB_TTBR1 (0x028)
+#define CB_TTBCR (0x030)
+#define CB_CONTEXTIDR (0x034)
+#define CB_PRRR (0x038)
+#define CB_MAIR0 (0x038)
+#define CB_NMRR (0x03C)
+#define CB_MAIR1 (0x03C)
+#define CB_PAR (0x050)
+#define CB_FSR (0x058)
+#define CB_FSRRESTORE (0x05C)
+#define CB_FAR (0x060)
+#define CB_FSYNR0 (0x068)
+#define CB_FSYNR1 (0x06C)
+#define CB_TLBIVA (0x600)
+#define CB_TLBIVAA (0x608)
+#define CB_TLBIASID (0x610)
+#define CB_TLBIALL (0x618)
+#define CB_TLBIVAL (0x620)
+#define CB_TLBIVAAL (0x628)
+#define CB_TLBSYNC (0x7F0)
+#define CB_TLBSTATUS (0x7F4)
+#define CB_ATS1PR (0x800)
+#define CB_ATS1PW (0x808)
+#define CB_ATS1UR (0x810)
+#define CB_ATS1UW (0x818)
+#define CB_ATSR (0x8F0)
+#define CB_PMXEVCNTR_N (0xE00)
+#define CB_PMXEVTYPER_N (0xE80)
+#define CB_PMCFGR (0xF00)
+#define CB_PMCR (0xF04)
+#define CB_PMCEID0 (0xF20)
+#define CB_PMCEID1 (0xF24)
+#define CB_PMCNTENSET (0xF40)
+#define CB_PMCNTENCLR (0xF44)
+#define CB_PMINTENSET (0xF48)
+#define CB_PMINTENCLR (0xF4C)
+#define CB_PMOVSCLR (0xF50)
+#define CB_PMOVSSET (0xF58)
+#define CB_PMAUTHSTATUS (0xFB8)
+
+/* Global Register Fields */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG (CR0_NSCFG_MASK << CR0_NSCFG_SHIFT)
+#define CR0_WACFG (CR0_WACFG_MASK << CR0_WACFG_SHIFT)
+#define CR0_RACFG (CR0_RACFG_MASK << CR0_RACFG_SHIFT)
+#define CR0_SHCFG (CR0_SHCFG_MASK << CR0_SHCFG_SHIFT)
+#define CR0_SMCFCFG (CR0_SMCFCFG_MASK << CR0_SMCFCFG_SHIFT)
+#define CR0_MTCFG (CR0_MTCFG_MASK << CR0_MTCFG_SHIFT)
+#define CR0_MEMATTR (CR0_MEMATTR_MASK << CR0_MEMATTR_SHIFT)
+#define CR0_BSU (CR0_BSU_MASK << CR0_BSU_SHIFT)
+#define CR0_FB (CR0_FB_MASK << CR0_FB_SHIFT)
+#define CR0_PTM (CR0_PTM_MASK << CR0_PTM_SHIFT)
+#define CR0_VMIDPNE (CR0_VMIDPNE_MASK << CR0_VMIDPNE_SHIFT)
+#define CR0_USFCFG (CR0_USFCFG_MASK << CR0_USFCFG_SHIFT)
+#define CR0_GSE (CR0_GSE_MASK << CR0_GSE_SHIFT)
+#define CR0_STALLD (CR0_STALLD_MASK << CR0_STALLD_SHIFT)
+#define CR0_TRANSIENTCFG (CR0_TRANSIENTCFG_MASK << CR0_TRANSIENTCFG_SHIFT)
+#define CR0_GCFGFIE (CR0_GCFGFIE_MASK << CR0_GCFGFIE_SHIFT)
+#define CR0_GCFGFRE (CR0_GCFGFRE_MASK << CR0_GCFGFRE_SHIFT)
+#define CR0_GFIE (CR0_GFIE_MASK << CR0_GFIE_SHIFT)
+#define CR0_GFRE (CR0_GFRE_MASK << CR0_GFRE_SHIFT)
+#define CR0_CLIENTPD (CR0_CLIENTPD_MASK << CR0_CLIENTPD_SHIFT)
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID (CR2_BPVMID_MASK << CR2_BPVMID_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR (GATS1PR_ADDR_MASK << GATS1PR_ADDR_SHIFT)
+#define GATS1PR_NDX (GATS1PR_NDX_MASK << GATS1PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR (GATS1PW_ADDR_MASK << GATS1PW_ADDR_SHIFT)
+#define GATS1PW_NDX (GATS1PW_NDX_MASK << GATS1PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR (GATS1UR_ADDR_MASK << GATS1UR_ADDR_SHIFT)
+#define GATS1UR_NDX (GATS1UR_NDX_MASK << GATS1UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR (GATS1UW_ADDR_MASK << GATS1UW_ADDR_SHIFT)
+#define GATS1UW_NDX (GATS1UW_NDX_MASK << GATS1UW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR (GATS12PR_ADDR_MASK << GATS12PR_ADDR_SHIFT)
+#define GATS12PR_NDX (GATS12PR_NDX_MASK << GATS12PR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR (GATS12PW_ADDR_MASK << GATS12PW_ADDR_SHIFT)
+#define GATS12PW_NDX (GATS12PW_NDX_MASK << GATS12PW_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR (GATS12UR_ADDR_MASK << GATS12UR_ADDR_SHIFT)
+#define GATS12UR_NDX (GATS12UR_NDX_MASK << GATS12UR_NDX_SHIFT)
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR (GATS12UW_ADDR_MASK << GATS12UW_ADDR_SHIFT)
+#define GATS12UW_NDX (GATS12UW_NDX_MASK << GATS12UW_NDX_SHIFT)
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE (GATSR_ACTIVE_MASK << GATSR_ACTIVE_SHIFT)
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR (GFAR_FADDR_MASK << GFAR_FADDR_SHIFT)
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF (GFSR_ICF_MASK << GFSR_ICF_SHIFT)
+#define GFSR_USF (GFSR_USF_MASK << GFSR_USF_SHIFT)
+#define GFSR_SMCF (GFSR_SMCF_MASK << GFSR_SMCF_SHIFT)
+#define GFSR_UCBF (GFSR_UCBF_MASK << GFSR_UCBF_SHIFT)
+#define GFSR_UCIF (GFSR_UCIF_MASK << GFSR_UCIF_SHIFT)
+#define GFSR_CAF (GFSR_CAF_MASK << GFSR_CAF_SHIFT)
+#define GFSR_EF (GFSR_EF_MASK << GFSR_EF_SHIFT)
+#define GFSR_PF (GFSR_PF_MASK << GFSR_PF_SHIFT)
+#define GFSR_MULTI (GFSR_MULTI_MASK << GFSR_MULTI_SHIFT)
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED (GFSYNR0_NESTED_MASK << GFSYNR0_NESTED_SHIFT)
+#define GFSYNR0_WNR (GFSYNR0_WNR_MASK << GFSYNR0_WNR_SHIFT)
+#define GFSYNR0_PNU (GFSYNR0_PNU_MASK << GFSYNR0_PNU_SHIFT)
+#define GFSYNR0_IND (GFSYNR0_IND_MASK << GFSYNR0_IND_SHIFT)
+#define GFSYNR0_NSSTATE (GFSYNR0_NSSTATE_MASK << GFSYNR0_NSSTATE_SHIFT)
+#define GFSYNR0_NSATTR (GFSYNR0_NSATTR_MASK << GFSYNR0_NSATTR_SHIFT)
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID (GFSYNR1_SID_MASK << GFSYNR1_SID_SHIFT)
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F (GPAR_F_MASK << GPAR_F_SHIFT)
+#define GPAR_SS (GPAR_SS_MASK << GPAR_SS_SHIFT)
+#define GPAR_OUTER (GPAR_OUTER_MASK << GPAR_OUTER_SHIFT)
+#define GPAR_INNER (GPAR_INNER_MASK << GPAR_INNER_SHIFT)
+#define GPAR_SH (GPAR_SH_MASK << GPAR_SH_SHIFT)
+#define GPAR_NS (GPAR_NS_MASK << GPAR_NS_SHIFT)
+#define GPAR_NOS (GPAR_NOS_MASK << GPAR_NOS_SHIFT)
+#define GPAR_PA (GPAR_PA_MASK << GPAR_PA_SHIFT)
+#define GPAR_TF (GPAR_TF_MASK << GPAR_TF_SHIFT)
+#define GPAR_AFF (GPAR_AFF_MASK << GPAR_AFF_SHIFT)
+#define GPAR_PF (GPAR_PF_MASK << GPAR_PF_SHIFT)
+#define GPAR_EF (GPAR_EF_MASK << GPAR_EF_SHIFT)
+#define GPAR_TLCMCF (GPAR_TLBMCF_MASK << GPAR_TLCMCF_SHIFT)
+#define GPAR_TLBLKF (GPAR_TLBLKF_MASK << GPAR_TLBLKF_SHIFT)
+#define GPAR_UCBF (GPAR_UCBF_MASK << GFAR_UCBF_SHIFT)
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG (IDR0_NUMSMRG_MASK << IDR0_NUMSMGR_SHIFT)
+#define IDR0_NUMSIDB (IDR0_NUMSIDB_MASK << IDR0_NUMSIDB_SHIFT)
+#define IDR0_BTM (IDR0_BTM_MASK << IDR0_BTM_SHIFT)
+#define IDR0_CTTW (IDR0_CTTW_MASK << IDR0_CTTW_SHIFT)
+#define IDR0_NUMIRPT (IDR0_NUMIPRT_MASK << IDR0_NUMIRPT_SHIFT)
+#define IDR0_PTFS (IDR0_PTFS_MASK << IDR0_PTFS_SHIFT)
+#define IDR0_SMS (IDR0_SMS_MASK << IDR0_SMS_SHIFT)
+#define IDR0_NTS (IDR0_NTS_MASK << IDR0_NTS_SHIFT)
+#define IDR0_S2TS (IDR0_S2TS_MASK << IDR0_S2TS_SHIFT)
+#define IDR0_S1TS (IDR0_S1TS_MASK << IDR0_S1TS_SHIFT)
+#define IDR0_SES (IDR0_SES_MASK << IDR0_SES_SHIFT)
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB (IDR1_NUMCB_MASK << IDR1_NUMCB_SHIFT)
+#define IDR1_NUMSSDNDXB (IDR1_NUMSSDNDXB_MASK << IDR1_NUMSSDNDXB_SHIFT)
+#define IDR1_SSDTP (IDR1_SSDTP_MASK << IDR1_SSDTP_SHIFT)
+#define IDR1_SMCD (IDR1_SMCD_MASK << IDR1_SMCD_SHIFT)
+#define IDR1_NUMS2CB (IDR1_NUMS2CB_MASK << IDR1_NUMS2CB_SHIFT)
+#define IDR1_NUMPAGENDXB (IDR1_NUMPAGENDXB_MASK << IDR1_NUMPAGENDXB_SHIFT)
+#define IDR1_PAGESIZE (IDR1_PAGESIZE_MASK << IDR1_PAGESIZE_SHIFT)
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS (IDR2_IAS_MASK << IDR2_IAS_SHIFT)
+#define IDR1_OAS (IDR2_OAS_MASK << IDR2_OAS_SHIFT)
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR (IDR7_MINOR_MASK << IDR7_MINOR_SHIFT)
+#define IDR7_MAJOR (IDR7_MAJOR_MASK << IDR7_MAJOR_SHIFT)
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX (S2CR_CBNDX_MASK << S2cR_CBNDX_SHIFT)
+#define S2CR_SHCFG (S2CR_SHCFG_MASK << s2CR_SHCFG_SHIFT)
+#define S2CR_MTCFG (S2CR_MTCFG_MASK << S2CR_MTCFG_SHIFT)
+#define S2CR_MEMATTR (S2CR_MEMATTR_MASK << S2CR_MEMATTR_SHIFT)
+#define S2CR_TYPE (S2CR_TYPE_MASK << S2CR_TYPE_SHIFT)
+#define S2CR_NSCFG (S2CR_NSCFG_MASK << S2CR_NSCFG_SHIFT)
+#define S2CR_RACFG (S2CR_RACFG_MASK << S2CR_RACFG_SHIFT)
+#define S2CR_WACFG (S2CR_WACFG_MASK << S2CR_WACFG_SHIFT)
+#define S2CR_PRIVCFG (S2CR_PRIVCFG_MASK << S2CR_PRIVCFG_SHIFT)
+#define S2CR_INSTCFG (S2CR_INSTCFG_MASK << S2CR_INSTCFG_SHIFT)
+#define S2CR_TRANSIENTCFG (S2CR_TRANSIENTCFG_MASK << S2CR_TRANSIENTCFG_SHIFT)
+#define S2CR_VMID (S2CR_VMID_MASK << S2CR_VMID_SHIFT)
+#define S2CR_BSU (S2CR_BSU_MASK << S2CR_BSU_SHIFT)
+#define S2CR_FB (S2CR_FB_MASK << S2CR_FB_SHIFT)
+
+/* Stream Match Register: SMR */
+#define SMR_ID (SMR_ID_MASK << SMR_ID_SHIFT)
+#define SMR_MASK (SMR_MASK_MASK << SMR_MASK_SHIFT)
+#define SMR_VALID (SMR_VALID_MASK << SMR_VALID_SHIFT)
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE (TLBGSTATUS_GSACTIVE_MASK << \
+ TLBGSTATUS_GSACTIVE_SHIFT)
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR (TLBIVAH_ADDR_MASK << TLBIVAH_ADDR_SHIFT)
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID (TLBIVMID_VMID_MASK << TLBIVMID_VMID_SHIFT)
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID (CBAR_VMID_MASK << CBAR_VMID_SHIFT)
+#define CBAR_CBNDX (CBAR_CBNDX_MASK << CBAR_CBNDX_SHIFT)
+#define CBAR_BPSHCFG (CBAR_BPSHCFG_MASK << CBAR_BPSHCFG_SHIFT)
+#define CBAR_HYPC (CBAR_HYPC_MASK << CBAR_HYPC_SHIFT)
+#define CBAR_FB (CBAR_FB_MASK << CBAR_FB_SHIFT)
+#define CBAR_MEMATTR (CBAR_MEMATTR_MASK << CBAR_MEMATTR_SHIFT)
+#define CBAR_TYPE (CBAR_TYPE_MASK << CBAR_TYPE_SHIFT)
+#define CBAR_BSU (CBAR_BSU_MASK << CBAR_BSU_SHIFT)
+#define CBAR_RACFG (CBAR_RACFG_MASK << CBAR_RACFG_SHIFT)
+#define CBAR_WACFG (CBAR_WACFG_MASK << CBAR_WACFG_SHIFT)
+#define CBAR_IRPTNDX (CBAR_IRPTNDX_MASK << CBAR_IRPTNDX_SHIFT)
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID (CBFRSYNRA_SID_MASK << CBFRSYNRA_SID_SHIFT)
+
+/* Performance Monitoring Register Fields */
+
+/* Stage 1 Context Bank Format Fields */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY \
+ (CB_ACTLR_REQPRIORITY_MASK << CB_ACTLR_REQPRIORITY_SHIFT)
+#define CB_ACTLR_REQPRIORITYCFG \
+ (CB_ACTLR_REQPRIORITYCFG_MASK << CB_ACTLR_REQPRIORITYCFG_SHIFT)
+#define CB_ACTLR_PRIVCFG (CB_ACTLR_PRIVCFG_MASK << CB_ACTLR_PRIVCFG_SHIFT)
+#define CB_ACTLR_BPRCOSH (CB_ACTLR_BPRCOSH_MASK << CB_ACTLR_BPRCOSH_SHIFT)
+#define CB_ACTLR_BPRCISH (CB_ACTLR_BPRCISH_MASK << CB_ACTLR_BPRCISH_SHIFT)
+#define CB_ACTLR_BPRCNSH (CB_ACTLR_BPRCNSH_MASK << CB_ACTLR_BPRCNSH_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR (CB_ATS1PR_ADDR_MASK << CB_ATS1PR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR (CB_ATS1PW_ADDR_MASK << CB_ATS1PW_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR (CB_ATS1UR_ADDR_MASK << CB_ATS1UR_ADDR_SHIFT)
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR (CB_ATS1UW_ADDR_MASK << CB_ATS1UW_ADDR_SHIFT)
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE (CB_ATSR_ACTIVE_MASK << CB_ATSR_ACTIVE_SHIFT)
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID (CB_CONTEXTIDR_ASID_MASK << \
+ CB_CONTEXTIDR_ASID_SHIFT)
+#define CB_CONTEXTIDR_PROCID (CB_CONTEXTIDR_PROCID_MASK << \
+ CB_CONTEXTIDR_PROCID_SHIFT)
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR (CB_FAR_FADDR_MASK << CB_FAR_FADDR_SHIFT)
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF (CB_FSR_TF_MASK << CB_FSR_TF_SHIFT)
+#define CB_FSR_AFF (CB_FSR_AFF_MASK << CB_FSR_AFF_SHIFT)
+#define CB_FSR_PF (CB_FSR_PF_MASK << CB_FSR_PF_SHIFT)
+#define CB_FSR_EF (CB_FSR_EF_MASK << CB_FSR_EF_SHIFT)
+#define CB_FSR_TLBMCF (CB_FSR_TLBMCF_MASK << CB_FSR_TLBMCF_SHIFT)
+#define CB_FSR_TLBLKF (CB_FSR_TLBLKF_MASK << CB_FSR_TLBLKF_SHIFT)
+#define CB_FSR_SS (CB_FSR_SS_MASK << CB_FSR_SS_SHIFT)
+#define CB_FSR_MULTI (CB_FSR_MULTI_MASK << CB_FSR_MULTI_SHIFT)
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL (CB_FSYNR0_PLVL_MASK << CB_FSYNR0_PLVL_SHIFT)
+#define CB_FSYNR0_S1PTWF (CB_FSYNR0_S1PTWF_MASK << CB_FSYNR0_S1PTWF_SHIFT)
+#define CB_FSYNR0_WNR (CB_FSYNR0_WNR_MASK << CB_FSYNR0_WNR_SHIFT)
+#define CB_FSYNR0_PNU (CB_FSYNR0_PNU_MASK << CB_FSYNR0_PNU_SHIFT)
+#define CB_FSYNR0_IND (CB_FSYNR0_IND_MASK << CB_FSYNR0_IND_SHIFT)
+#define CB_FSYNR0_NSSTATE (CB_FSYNR0_NSSTATE_MASK << CB_FSYNR0_NSSTATE_SHIFT)
+#define CB_FSYNR0_NSATTR (CB_FSYNR0_NSATTR_MASK << CB_FSYNR0_NSATTR_SHIFT)
+#define CB_FSYNR0_ATOF (CB_FSYNR0_ATOF_MASK << CB_FSYNR0_ATOF_SHIFT)
+#define CB_FSYNR0_PTWF (CB_FSYNR0_PTWF_MASK << CB_FSYNR0_PTWF_SHIFT)
+#define CB_FSYNR0_AFR (CB_FSYNR0_AFR_MASK << CB_FSYNR0_AFR_SHIFT)
+#define CB_FSYNR0_S1CBNDX (CB_FSYNR0_S1CBNDX_MASK << CB_FSYNR0_S1CBNDX_SHIFT)
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0 (CB_NMRR_IR0_MASK << CB_NMRR_IR0_SHIFT)
+#define CB_NMRR_IR1 (CB_NMRR_IR1_MASK << CB_NMRR_IR1_SHIFT)
+#define CB_NMRR_IR2 (CB_NMRR_IR2_MASK << CB_NMRR_IR2_SHIFT)
+#define CB_NMRR_IR3 (CB_NMRR_IR3_MASK << CB_NMRR_IR3_SHIFT)
+#define CB_NMRR_IR4 (CB_NMRR_IR4_MASK << CB_NMRR_IR4_SHIFT)
+#define CB_NMRR_IR5 (CB_NMRR_IR5_MASK << CB_NMRR_IR5_SHIFT)
+#define CB_NMRR_IR6 (CB_NMRR_IR6_MASK << CB_NMRR_IR6_SHIFT)
+#define CB_NMRR_IR7 (CB_NMRR_IR7_MASK << CB_NMRR_IR7_SHIFT)
+#define CB_NMRR_OR0 (CB_NMRR_OR0_MASK << CB_NMRR_OR0_SHIFT)
+#define CB_NMRR_OR1 (CB_NMRR_OR1_MASK << CB_NMRR_OR1_SHIFT)
+#define CB_NMRR_OR2 (CB_NMRR_OR2_MASK << CB_NMRR_OR2_SHIFT)
+#define CB_NMRR_OR3 (CB_NMRR_OR3_MASK << CB_NMRR_OR3_SHIFT)
+#define CB_NMRR_OR4 (CB_NMRR_OR4_MASK << CB_NMRR_OR4_SHIFT)
+#define CB_NMRR_OR5 (CB_NMRR_OR5_MASK << CB_NMRR_OR5_SHIFT)
+#define CB_NMRR_OR6 (CB_NMRR_OR6_MASK << CB_NMRR_OR6_SHIFT)
+#define CB_NMRR_OR7 (CB_NMRR_OR7_MASK << CB_NMRR_OR7_SHIFT)
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F (CB_PAR_F_MASK << CB_PAR_F_SHIFT)
+#define CB_PAR_SS (CB_PAR_SS_MASK << CB_PAR_SS_SHIFT)
+#define CB_PAR_OUTER (CB_PAR_OUTER_MASK << CB_PAR_OUTER_SHIFT)
+#define CB_PAR_INNER (CB_PAR_INNER_MASK << CB_PAR_INNER_SHIFT)
+#define CB_PAR_SH (CB_PAR_SH_MASK << CB_PAR_SH_SHIFT)
+#define CB_PAR_NS (CB_PAR_NS_MASK << CB_PAR_NS_SHIFT)
+#define CB_PAR_NOS (CB_PAR_NOS_MASK << CB_PAR_NOS_SHIFT)
+#define CB_PAR_PA (CB_PAR_PA_MASK << CB_PAR_PA_SHIFT)
+#define CB_PAR_TF (CB_PAR_TF_MASK << CB_PAR_TF_SHIFT)
+#define CB_PAR_AFF (CB_PAR_AFF_MASK << CB_PAR_AFF_SHIFT)
+#define CB_PAR_PF (CB_PAR_PF_MASK << CB_PAR_PF_SHIFT)
+#define CB_PAR_EF (CB_PAR_EF_MASK << CB_PAR_EF_SHIFT)
+#define CB_PAR_TLBMCF (CB_PAR_TLBMCF_MASK << CB_PAR_TLBMCF_SHIFT)
+#define CB_PAR_TLBLKF (CB_PAR_TLBLKF_MASK << CB_PAR_TLBLKF_SHIFT)
+#define CB_PAR_ATOT (CB_PAR_ATOT_MASK << CB_PAR_ATOT_SHIFT)
+#define CB_PAR_PLVL (CB_PAR_PLVL_MASK << CB_PAR_PLVL_SHIFT)
+#define CB_PAR_STAGE (CB_PAR_STAGE_MASK << CB_PAR_STAGE_SHIFT)
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0 (CB_PRRR_TR0_MASK << CB_PRRR_TR0_SHIFT)
+#define CB_PRRR_TR1 (CB_PRRR_TR1_MASK << CB_PRRR_TR1_SHIFT)
+#define CB_PRRR_TR2 (CB_PRRR_TR2_MASK << CB_PRRR_TR2_SHIFT)
+#define CB_PRRR_TR3 (CB_PRRR_TR3_MASK << CB_PRRR_TR3_SHIFT)
+#define CB_PRRR_TR4 (CB_PRRR_TR4_MASK << CB_PRRR_TR4_SHIFT)
+#define CB_PRRR_TR5 (CB_PRRR_TR5_MASK << CB_PRRR_TR5_SHIFT)
+#define CB_PRRR_TR6 (CB_PRRR_TR6_MASK << CB_PRRR_TR6_SHIFT)
+#define CB_PRRR_TR7 (CB_PRRR_TR7_MASK << CB_PRRR_TR7_SHIFT)
+#define CB_PRRR_DS0 (CB_PRRR_DS0_MASK << CB_PRRR_DS0_SHIFT)
+#define CB_PRRR_DS1 (CB_PRRR_DS1_MASK << CB_PRRR_DS1_SHIFT)
+#define CB_PRRR_NS0 (CB_PRRR_NS0_MASK << CB_PRRR_NS0_SHIFT)
+#define CB_PRRR_NS1 (CB_PRRR_NS1_MASK << CB_PRRR_NS1_SHIFT)
+#define CB_PRRR_NOS0 (CB_PRRR_NOS0_MASK << CB_PRRR_NOS0_SHIFT)
+#define CB_PRRR_NOS1 (CB_PRRR_NOS1_MASK << CB_PRRR_NOS1_SHIFT)
+#define CB_PRRR_NOS2 (CB_PRRR_NOS2_MASK << CB_PRRR_NOS2_SHIFT)
+#define CB_PRRR_NOS3 (CB_PRRR_NOS3_MASK << CB_PRRR_NOS3_SHIFT)
+#define CB_PRRR_NOS4 (CB_PRRR_NOS4_MASK << CB_PRRR_NOS4_SHIFT)
+#define CB_PRRR_NOS5 (CB_PRRR_NOS5_MASK << CB_PRRR_NOS5_SHIFT)
+#define CB_PRRR_NOS6 (CB_PRRR_NOS6_MASK << CB_PRRR_NOS6_SHIFT)
+#define CB_PRRR_NOS7 (CB_PRRR_NOS7_MASK << CB_PRRR_NOS7_SHIFT)
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR (CB_RESUME_TNR_MASK << CB_RESUME_TNR_SHIFT)
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M (CB_SCTLR_M_MASK << CB_SCTLR_M_SHIFT)
+#define CB_SCTLR_TRE (CB_SCTLR_TRE_MASK << CB_SCTLR_TRE_SHIFT)
+#define CB_SCTLR_AFE (CB_SCTLR_AFE_MASK << CB_SCTLR_AFE_SHIFT)
+#define CB_SCTLR_AFFD (CB_SCTLR_AFFD_MASK << CB_SCTLR_AFFD_SHIFT)
+#define CB_SCTLR_E (CB_SCTLR_E_MASK << CB_SCTLR_E_SHIFT)
+#define CB_SCTLR_CFRE (CB_SCTLR_CFRE_MASK << CB_SCTLR_CFRE_SHIFT)
+#define CB_SCTLR_CFIE (CB_SCTLR_CFIE_MASK << CB_SCTLR_CFIE_SHIFT)
+#define CB_SCTLR_CFCFG (CB_SCTLR_CFCFG_MASK << CB_SCTLR_CFCFG_SHIFT)
+#define CB_SCTLR_HUPCF (CB_SCTLR_HUPCF_MASK << CB_SCTLR_HUPCF_SHIFT)
+#define CB_SCTLR_WXN (CB_SCTLR_WXN_MASK << CB_SCTLR_WXN_SHIFT)
+#define CB_SCTLR_UWXN (CB_SCTLR_UWXN_MASK << CB_SCTLR_UWXN_SHIFT)
+#define CB_SCTLR_ASIDPNE (CB_SCTLR_ASIDPNE_MASK << CB_SCTLR_ASIDPNE_SHIFT)
+#define CB_SCTLR_TRANSIENTCFG (CB_SCTLR_TRANSIENTCFG_MASK << \
+ CB_SCTLR_TRANSIENTCFG_SHIFT)
+#define CB_SCTLR_MEMATTR (CB_SCTLR_MEMATTR_MASK << CB_SCTLR_MEMATTR_SHIFT)
+#define CB_SCTLR_MTCFG (CB_SCTLR_MTCFG_MASK << CB_SCTLR_MTCFG_SHIFT)
+#define CB_SCTLR_SHCFG (CB_SCTLR_SHCFG_MASK << CB_SCTLR_SHCFG_SHIFT)
+#define CB_SCTLR_RACFG (CB_SCTLR_RACFG_MASK << CB_SCTLR_RACFG_SHIFT)
+#define CB_SCTLR_WACFG (CB_SCTLR_WACFG_MASK << CB_SCTLR_WACFG_SHIFT)
+#define CB_SCTLR_NSCFG (CB_SCTLR_NSCFG_MASK << CB_SCTLR_NSCFG_SHIFT)
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID (CB_TLBIASID_ASID_MASK << CB_TLBIASID_ASID_SHIFT)
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID (CB_TLBIVA_ASID_MASK << CB_TLBIVA_ASID_SHIFT)
+#define CB_TLBIVA_VA (CB_TLBIVA_VA_MASK << CB_TLBIVA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA (CB_TLBIVAA_VA_MASK << CB_TLBIVAA_VA_SHIFT)
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA (CB_TLBIVAAL_VA_MASK << CB_TLBIVAAL_VA_SHIFT)
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID (CB_TLBIVAL_ASID_MASK << CB_TLBIVAL_ASID_SHIFT)
+#define CB_TLBIVAL_VA (CB_TLBIVAL_VA_MASK << CB_TLBIVAL_VA_SHIFT)
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE (CB_TLBSTATUS_SACTIVE_MASK << \
+ CB_TLBSTATUS_SACTIVE_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_EAE (CB_TTBCR_EAE_MASK << CB_TTBCR_EAE_SHIFT)
+
+#define CB_TTBR0_ADDR (CB_TTBR0_ADDR_MASK << CB_TTBR0_ADDR_SHIFT)
+
+#ifdef CONFIG_IOMMU_LPAE
+/* Translation Table Base Register: CB_TTBR */
+#define CB_TTBR0_ASID (CB_TTBR0_ASID_MASK << CB_TTBR0_ASID_SHIFT)
+#define CB_TTBR1_ASID (CB_TTBR1_ASID_MASK << CB_TTBR1_ASID_SHIFT)
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ (CB_TTBCR_T0SZ_MASK << CB_TTBCR_T0SZ_SHIFT)
+#define CB_TTBCR_T1SZ (CB_TTBCR_T1SZ_MASK << CB_TTBCR_T1SZ_SHIFT)
+#define CB_TTBCR_EPD0 (CB_TTBCR_EPD0_MASK << CB_TTBCR_EPD0_SHIFT)
+#define CB_TTBCR_EPD1 (CB_TTBCR_EPD1_MASK << CB_TTBCR_EPD1_SHIFT)
+#define CB_TTBCR_IRGN0 (CB_TTBCR_IRGN0_MASK << CB_TTBCR_IRGN0_SHIFT)
+#define CB_TTBCR_IRGN1 (CB_TTBCR_IRGN1_MASK << CB_TTBCR_IRGN1_SHIFT)
+#define CB_TTBCR_ORGN0 (CB_TTBCR_ORGN0_MASK << CB_TTBCR_ORGN0_SHIFT)
+#define CB_TTBCR_ORGN1 (CB_TTBCR_ORGN1_MASK << CB_TTBCR_ORGN1_SHIFT)
+#define CB_TTBCR_NSCFG0 (CB_TTBCR_NSCFG0_MASK << CB_TTBCR_NSCFG0_SHIFT)
+#define CB_TTBCR_NSCFG1 (CB_TTBCR_NSCFG1_MASK << CB_TTBCR_NSCFG1_SHIFT)
+#define CB_TTBCR_SH0 (CB_TTBCR_SH0_MASK << CB_TTBCR_SH0_SHIFT)
+#define CB_TTBCR_SH1 (CB_TTBCR_SH1_MASK << CB_TTBCR_SH1_SHIFT)
+#define CB_TTBCR_A1 (CB_TTBCR_A1_MASK << CB_TTBCR_A1_SHIFT)
+
+#else
+
+/* Translation Table Base Register 0: CB_TTBR0 */
+#define CB_TTBR0_IRGN1 (CB_TTBR0_IRGN1_MASK << CB_TTBR0_IRGN1_SHIFT)
+#define CB_TTBR0_S (CB_TTBR0_S_MASK << CB_TTBR0_S_SHIFT)
+#define CB_TTBR0_RGN (CB_TTBR0_RGN_MASK << CB_TTBR0_RGN_SHIFT)
+#define CB_TTBR0_NOS (CB_TTBR0_NOS_MASK << CB_TTBR0_NOS_SHIFT)
+#define CB_TTBR0_IRGN0 (CB_TTBR0_IRGN0_MASK << CB_TTBR0_IRGN0_SHIFT)
+
+/* Translation Table Base Register 1: CB_TTBR1 */
+#define CB_TTBR1_IRGN1 (CB_TTBR1_IRGN1_MASK << CB_TTBR1_IRGN1_SHIFT)
+#define CB_TTBR1_S (CB_TTBR1_S_MASK << CB_TTBR1_S_SHIFT)
+#define CB_TTBR1_RGN (CB_TTBR1_RGN_MASK << CB_TTBR1_RGN_SHIFT)
+#define CB_TTBR1_NOS (CB_TTBR1_NOS_MASK << CB_TTBR1_NOS_SHIFT)
+#define CB_TTBR1_IRGN0 (CB_TTBR1_IRGN0_MASK << CB_TTBR1_IRGN0_SHIFT)
+#endif
+
+/* Global Register Masks */
+/* Configuration Register 0 */
+#define CR0_NSCFG_MASK 0x03
+#define CR0_WACFG_MASK 0x03
+#define CR0_RACFG_MASK 0x03
+#define CR0_SHCFG_MASK 0x03
+#define CR0_SMCFCFG_MASK 0x01
+#define NSCR0_SMCFCFG_MASK 0x01
+#define CR0_MTCFG_MASK 0x01
+#define CR0_MEMATTR_MASK 0x0F
+#define CR0_BSU_MASK 0x03
+#define CR0_FB_MASK 0x01
+#define CR0_PTM_MASK 0x01
+#define CR0_VMIDPNE_MASK 0x01
+#define CR0_USFCFG_MASK 0x01
+#define NSCR0_USFCFG_MASK 0x01
+#define CR0_GSE_MASK 0x01
+#define CR0_STALLD_MASK 0x01
+#define NSCR0_STALLD_MASK 0x01
+#define CR0_TRANSIENTCFG_MASK 0x03
+#define CR0_GCFGFIE_MASK 0x01
+#define NSCR0_GCFGFIE_MASK 0x01
+#define CR0_GCFGFRE_MASK 0x01
+#define NSCR0_GCFGFRE_MASK 0x01
+#define CR0_GFIE_MASK 0x01
+#define NSCR0_GFIE_MASK 0x01
+#define CR0_GFRE_MASK 0x01
+#define NSCR0_GFRE_MASK 0x01
+#define CR0_CLIENTPD_MASK 0x01
+#define NSCR0_CLIENTPD_MASK 0x01
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_MASK 0x01
+#define ACR_MMUDIS_BPTLBEN_MASK 0x01
+#define ACR_S2CR_BPTLBEN_MASK 0x01
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_MASK 0x01
+#define NSACR_MMUDIS_BPTLBEN_MASK 0x01
+#define NSACR_S2CR_BPTLBEN_MASK 0x01
+
+/* Configuration Register 2 */
+#define CR2_BPVMID_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_MASK 0xFFFFF
+#define GATS1PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_MASK 0xFFFFF
+#define GATS1PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_MASK 0xFFFFF
+#define GATS1UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_MASK 0xFFFFF
+#define GATS1UW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS1PR */
+#define GATS12PR_ADDR_MASK 0xFFFFF
+#define GATS12PR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS1PW */
+#define GATS12PW_ADDR_MASK 0xFFFFF
+#define GATS12PW_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS1UR */
+#define GATS12UR_ADDR_MASK 0xFFFFF
+#define GATS12UR_NDX_MASK 0xFF
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS1UW */
+#define GATS12UW_ADDR_MASK 0xFFFFF
+#define GATS12UW_NDX_MASK 0xFF
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_MASK 0x01
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_MASK 0xFFFFFFFF
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_MASK 0x01
+#define GFSR_USF_MASK 0x01
+#define GFSR_SMCF_MASK 0x01
+#define GFSR_UCBF_MASK 0x01
+#define GFSR_UCIF_MASK 0x01
+#define GFSR_CAF_MASK 0x01
+#define GFSR_EF_MASK 0x01
+#define GFSR_PF_MASK 0x01
+#define GFSR_MULTI_MASK 0x01
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_MASK 0x01
+#define GFSYNR0_WNR_MASK 0x01
+#define GFSYNR0_PNU_MASK 0x01
+#define GFSYNR0_IND_MASK 0x01
+#define GFSYNR0_NSSTATE_MASK 0x01
+#define GFSYNR0_NSATTR_MASK 0x01
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_MASK 0x7FFF
+#define GFSYNr1_SSD_IDX_MASK 0x7FFF
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_MASK 0x01
+#define GPAR_SS_MASK 0x01
+#define GPAR_OUTER_MASK 0x03
+#define GPAR_INNER_MASK 0x03
+#define GPAR_SH_MASK 0x01
+#define GPAR_NS_MASK 0x01
+#define GPAR_NOS_MASK 0x01
+#define GPAR_PA_MASK 0xFFFFF
+#define GPAR_TF_MASK 0x01
+#define GPAR_AFF_MASK 0x01
+#define GPAR_PF_MASK 0x01
+#define GPAR_EF_MASK 0x01
+#define GPAR_TLBMCF_MASK 0x01
+#define GPAR_TLBLKF_MASK 0x01
+#define GPAR_UCBF_MASK 0x01
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_MASK 0xFF
+#define IDR0_NUMSIDB_MASK 0x0F
+#define IDR0_BTM_MASK 0x01
+#define IDR0_CTTW_MASK 0x01
+#define IDR0_NUMIPRT_MASK 0xFF
+#define IDR0_PTFS_MASK 0x01
+#define IDR0_SMS_MASK 0x01
+#define IDR0_NTS_MASK 0x01
+#define IDR0_S2TS_MASK 0x01
+#define IDR0_S1TS_MASK 0x01
+#define IDR0_SES_MASK 0x01
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_MASK 0xFF
+#define IDR1_NUMSSDNDXB_MASK 0x0F
+#define IDR1_SSDTP_MASK 0x01
+#define IDR1_SMCD_MASK 0x01
+#define IDR1_NUMS2CB_MASK 0xFF
+#define IDR1_NUMPAGENDXB_MASK 0x07
+#define IDR1_PAGESIZE_MASK 0x01
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_MASK 0x0F
+#define IDR2_OAS_MASK 0x0F
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_MASK 0x0F
+#define IDR7_MAJOR_MASK 0x0F
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_MASK 0xFF
+#define S2CR_SHCFG_MASK 0x03
+#define S2CR_MTCFG_MASK 0x01
+#define S2CR_MEMATTR_MASK 0x0F
+#define S2CR_TYPE_MASK 0x03
+#define S2CR_NSCFG_MASK 0x03
+#define S2CR_RACFG_MASK 0x03
+#define S2CR_WACFG_MASK 0x03
+#define S2CR_PRIVCFG_MASK 0x03
+#define S2CR_INSTCFG_MASK 0x03
+#define S2CR_TRANSIENTCFG_MASK 0x03
+#define S2CR_VMID_MASK 0xFF
+#define S2CR_BSU_MASK 0x03
+#define S2CR_FB_MASK 0x01
+
+/* Stream Match Register: SMR */
+#define SMR_ID_MASK 0x7FFF
+#define SMR_MASK_MASK 0x7FFF
+#define SMR_VALID_MASK 0x01
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_MASK 0x01
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_MASK 0xFFFFF
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_MASK 0xFF
+
+/* Global Register Space 1 Mask */
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_MASK 0xFF
+#define CBAR_CBNDX_MASK 0x03
+#define CBAR_BPSHCFG_MASK 0x03
+#define CBAR_HYPC_MASK 0x01
+#define CBAR_FB_MASK 0x01
+#define CBAR_MEMATTR_MASK 0x0F
+#define CBAR_TYPE_MASK 0x03
+#define CBAR_BSU_MASK 0x03
+#define CBAR_RACFG_MASK 0x03
+#define CBAR_WACFG_MASK 0x03
+#define CBAR_IRPTNDX_MASK 0xFF
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_MASK 0x7FFF
+
+/* Implementation defined register space masks */
+#define MICRO_MMU_CTRL_RESERVED_MASK 0x03
+#define MICRO_MMU_CTRL_HALT_REQ_MASK 0x01
+#define MICRO_MMU_CTRL_IDLE_MASK 0x01
+
+/* Stage 1 Context Bank Format Masks */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_MASK 0x3
+#define CB_ACTLR_REQPRIORITYCFG_MASK 0x1
+#define CB_ACTLR_PRIVCFG_MASK 0x3
+#define CB_ACTLR_BPRCOSH_MASK 0x1
+#define CB_ACTLR_BPRCISH_MASK 0x1
+#define CB_ACTLR_BPRCNSH_MASK 0x1
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_MASK 0xFFFFF
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_MASK 0xFFFFF
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_MASK 0x01
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_MASK 0xFF
+#define CB_CONTEXTIDR_PROCID_MASK 0xFFFFFF
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_MASK 0xFFFFFFFF
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_MASK 0x01
+#define CB_FSR_AFF_MASK 0x01
+#define CB_FSR_PF_MASK 0x01
+#define CB_FSR_EF_MASK 0x01
+#define CB_FSR_TLBMCF_MASK 0x01
+#define CB_FSR_TLBLKF_MASK 0x01
+#define CB_FSR_SS_MASK 0x01
+#define CB_FSR_MULTI_MASK 0x01
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_MASK 0x03
+#define CB_FSYNR0_S1PTWF_MASK 0x01
+#define CB_FSYNR0_WNR_MASK 0x01
+#define CB_FSYNR0_PNU_MASK 0x01
+#define CB_FSYNR0_IND_MASK 0x01
+#define CB_FSYNR0_NSSTATE_MASK 0x01
+#define CB_FSYNR0_NSATTR_MASK 0x01
+#define CB_FSYNR0_ATOF_MASK 0x01
+#define CB_FSYNR0_PTWF_MASK 0x01
+#define CB_FSYNR0_AFR_MASK 0x01
+#define CB_FSYNR0_S1CBNDX_MASK 0xFF
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_MASK 0x03
+#define CB_NMRR_IR1_MASK 0x03
+#define CB_NMRR_IR2_MASK 0x03
+#define CB_NMRR_IR3_MASK 0x03
+#define CB_NMRR_IR4_MASK 0x03
+#define CB_NMRR_IR5_MASK 0x03
+#define CB_NMRR_IR6_MASK 0x03
+#define CB_NMRR_IR7_MASK 0x03
+#define CB_NMRR_OR0_MASK 0x03
+#define CB_NMRR_OR1_MASK 0x03
+#define CB_NMRR_OR2_MASK 0x03
+#define CB_NMRR_OR3_MASK 0x03
+#define CB_NMRR_OR4_MASK 0x03
+#define CB_NMRR_OR5_MASK 0x03
+#define CB_NMRR_OR6_MASK 0x03
+#define CB_NMRR_OR7_MASK 0x03
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_MASK 0x01
+#define CB_PAR_SS_MASK 0x01
+#define CB_PAR_OUTER_MASK 0x03
+#define CB_PAR_INNER_MASK 0x07
+#define CB_PAR_SH_MASK 0x01
+#define CB_PAR_NS_MASK 0x01
+#define CB_PAR_NOS_MASK 0x01
+#define CB_PAR_PA_MASK 0xFFFFF
+#define CB_PAR_TF_MASK 0x01
+#define CB_PAR_AFF_MASK 0x01
+#define CB_PAR_PF_MASK 0x01
+#define CB_PAR_EF_MASK 0x01
+#define CB_PAR_TLBMCF_MASK 0x01
+#define CB_PAR_TLBLKF_MASK 0x01
+#define CB_PAR_ATOT_MASK 0x01ULL
+#define CB_PAR_PLVL_MASK 0x03ULL
+#define CB_PAR_STAGE_MASK 0x01ULL
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_MASK 0x03
+#define CB_PRRR_TR1_MASK 0x03
+#define CB_PRRR_TR2_MASK 0x03
+#define CB_PRRR_TR3_MASK 0x03
+#define CB_PRRR_TR4_MASK 0x03
+#define CB_PRRR_TR5_MASK 0x03
+#define CB_PRRR_TR6_MASK 0x03
+#define CB_PRRR_TR7_MASK 0x03
+#define CB_PRRR_DS0_MASK 0x01
+#define CB_PRRR_DS1_MASK 0x01
+#define CB_PRRR_NS0_MASK 0x01
+#define CB_PRRR_NS1_MASK 0x01
+#define CB_PRRR_NOS0_MASK 0x01
+#define CB_PRRR_NOS1_MASK 0x01
+#define CB_PRRR_NOS2_MASK 0x01
+#define CB_PRRR_NOS3_MASK 0x01
+#define CB_PRRR_NOS4_MASK 0x01
+#define CB_PRRR_NOS5_MASK 0x01
+#define CB_PRRR_NOS6_MASK 0x01
+#define CB_PRRR_NOS7_MASK 0x01
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_MASK 0x01
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_MASK 0x01
+#define CB_SCTLR_TRE_MASK 0x01
+#define CB_SCTLR_AFE_MASK 0x01
+#define CB_SCTLR_AFFD_MASK 0x01
+#define CB_SCTLR_E_MASK 0x01
+#define CB_SCTLR_CFRE_MASK 0x01
+#define CB_SCTLR_CFIE_MASK 0x01
+#define CB_SCTLR_CFCFG_MASK 0x01
+#define CB_SCTLR_HUPCF_MASK 0x01
+#define CB_SCTLR_WXN_MASK 0x01
+#define CB_SCTLR_UWXN_MASK 0x01
+#define CB_SCTLR_ASIDPNE_MASK 0x01
+#define CB_SCTLR_TRANSIENTCFG_MASK 0x03
+#define CB_SCTLR_MEMATTR_MASK 0x0F
+#define CB_SCTLR_MTCFG_MASK 0x01
+#define CB_SCTLR_SHCFG_MASK 0x03
+#define CB_SCTLR_RACFG_MASK 0x03
+#define CB_SCTLR_WACFG_MASK 0x03
+#define CB_SCTLR_NSCFG_MASK 0x03
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_MASK 0xFF
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_MASK 0xFF
+#define CB_TLBIVA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_MASK 0xFFFFF
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_MASK 0xFF
+#define CB_TLBIVAL_VA_MASK 0xFFFFF
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_MASK 0x01
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_MASK 0x07
+#define CB_TTBCR_T1SZ_MASK 0x07
+#define CB_TTBCR_EPD0_MASK 0x01
+#define CB_TTBCR_EPD1_MASK 0x01
+#define CB_TTBCR_IRGN0_MASK 0x03
+#define CB_TTBCR_IRGN1_MASK 0x03
+#define CB_TTBCR_ORGN0_MASK 0x03
+#define CB_TTBCR_ORGN1_MASK 0x03
+#define CB_TTBCR_NSCFG0_MASK 0x01
+#define CB_TTBCR_NSCFG1_MASK 0x01
+#define CB_TTBCR_SH0_MASK 0x03
+#define CB_TTBCR_SH1_MASK 0x03
+#define CB_TTBCR_A1_MASK 0x01
+#define CB_TTBCR_EAE_MASK 0x01
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_MASK 0x7FFFFFFFFULL
+#define CB_TTBR0_ASID_MASK 0xFF
+#define CB_TTBR1_ASID_MASK 0xFF
+#else
+#define CB_TTBR0_IRGN1_MASK 0x01
+#define CB_TTBR0_S_MASK 0x01
+#define CB_TTBR0_RGN_MASK 0x01
+#define CB_TTBR0_NOS_MASK 0x01
+#define CB_TTBR0_IRGN0_MASK 0x01
+#define CB_TTBR0_ADDR_MASK 0xFFFFFF
+
+#define CB_TTBR1_IRGN1_MASK 0x1
+#define CB_TTBR1_S_MASK 0x1
+#define CB_TTBR1_RGN_MASK 0x1
+#define CB_TTBR1_NOS_MASK 0X1
+#define CB_TTBR1_IRGN0_MASK 0X1
+#endif
+
+/* Global Register Shifts */
+/* Configuration Register: CR0 */
+#define CR0_NSCFG_SHIFT 28
+#define CR0_WACFG_SHIFT 26
+#define CR0_RACFG_SHIFT 24
+#define CR0_SHCFG_SHIFT 22
+#define CR0_SMCFCFG_SHIFT 21
+#define NSCR0_SMCFCFG_SHIFT 21
+#define CR0_MTCFG_SHIFT 20
+#define CR0_MEMATTR_SHIFT 16
+#define CR0_BSU_SHIFT 14
+#define CR0_FB_SHIFT 13
+#define CR0_PTM_SHIFT 12
+#define CR0_VMIDPNE_SHIFT 11
+#define CR0_USFCFG_SHIFT 10
+#define NSCR0_USFCFG_SHIFT 10
+#define CR0_GSE_SHIFT 9
+#define CR0_STALLD_SHIFT 8
+#define NSCR0_STALLD_SHIFT 8
+#define CR0_TRANSIENTCFG_SHIFT 6
+#define CR0_GCFGFIE_SHIFT 5
+#define NSCR0_GCFGFIE_SHIFT 5
+#define CR0_GCFGFRE_SHIFT 4
+#define NSCR0_GCFGFRE_SHIFT 4
+#define CR0_GFIE_SHIFT 2
+#define NSCR0_GFIE_SHIFT 2
+#define CR0_GFRE_SHIFT 1
+#define NSCR0_GFRE_SHIFT 1
+#define CR0_CLIENTPD_SHIFT 0
+#define NSCR0_CLIENTPD_SHIFT 0
+
+/* ACR */
+#define ACR_SMTNMC_BPTLBEN_SHIFT 8
+#define ACR_MMUDIS_BPTLBEN_SHIFT 9
+#define ACR_S2CR_BPTLBEN_SHIFT 10
+
+/* NSACR */
+#define NSACR_SMTNMC_BPTLBEN_SHIFT 8
+#define NSACR_MMUDIS_BPTLBEN_SHIFT 9
+#define NSACR_S2CR_BPTLBEN_SHIFT 10
+
+/* Configuration Register: CR2 */
+#define CR2_BPVMID_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Read: GATS1PR */
+#define GATS1PR_ADDR_SHIFT 12
+#define GATS1PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, Privileged Write: GATS1PW */
+#define GATS1PW_ADDR_SHIFT 12
+#define GATS1PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Read: GATS1UR */
+#define GATS1UR_ADDR_SHIFT 12
+#define GATS1UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1, User Write: GATS1UW */
+#define GATS1UW_ADDR_SHIFT 12
+#define GATS1UW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Read: GATS12PR */
+#define GATS12PR_ADDR_SHIFT 12
+#define GATS12PR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, Privileged Write: GATS12PW */
+#define GATS12PW_ADDR_SHIFT 12
+#define GATS12PW_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Read: GATS12UR */
+#define GATS12UR_ADDR_SHIFT 12
+#define GATS12UR_NDX_SHIFT 0
+
+/* Global Address Translation, Stage 1 and 2, User Write: GATS12UW */
+#define GATS12UW_ADDR_SHIFT 12
+#define GATS12UW_NDX_SHIFT 0
+
+/* Global Address Translation Status Register: GATSR */
+#define GATSR_ACTIVE_SHIFT 0
+
+/* Global Fault Address Register: GFAR */
+#define GFAR_FADDR_SHIFT 0
+
+/* Global Fault Status Register: GFSR */
+#define GFSR_ICF_SHIFT 0
+#define GFSR_USF_SHIFT 1
+#define GFSR_SMCF_SHIFT 2
+#define GFSR_UCBF_SHIFT 3
+#define GFSR_UCIF_SHIFT 4
+#define GFSR_CAF_SHIFT 5
+#define GFSR_EF_SHIFT 6
+#define GFSR_PF_SHIFT 7
+#define GFSR_MULTI_SHIFT 31
+
+/* Global Fault Syndrome Register 0: GFSYNR0 */
+#define GFSYNR0_NESTED_SHIFT 0
+#define GFSYNR0_WNR_SHIFT 1
+#define GFSYNR0_PNU_SHIFT 2
+#define GFSYNR0_IND_SHIFT 3
+#define GFSYNR0_NSSTATE_SHIFT 4
+#define GFSYNR0_NSATTR_SHIFT 5
+
+/* Global Fault Syndrome Register 1: GFSYNR1 */
+#define GFSYNR1_SID_SHIFT 0
+
+/* Global Physical Address Register: GPAR */
+#define GPAR_F_SHIFT 0
+#define GPAR_SS_SHIFT 1
+#define GPAR_OUTER_SHIFT 2
+#define GPAR_INNER_SHIFT 4
+#define GPAR_SH_SHIFT 7
+#define GPAR_NS_SHIFT 9
+#define GPAR_NOS_SHIFT 10
+#define GPAR_PA_SHIFT 12
+#define GPAR_TF_SHIFT 1
+#define GPAR_AFF_SHIFT 2
+#define GPAR_PF_SHIFT 3
+#define GPAR_EF_SHIFT 4
+#define GPAR_TLCMCF_SHIFT 5
+#define GPAR_TLBLKF_SHIFT 6
+#define GFAR_UCBF_SHIFT 30
+
+/* Identification Register: IDR0 */
+#define IDR0_NUMSMRG_SHIFT 0
+#define IDR0_NUMSIDB_SHIFT 9
+#define IDR0_BTM_SHIFT 13
+#define IDR0_CTTW_SHIFT 14
+#define IDR0_NUMIRPT_SHIFT 16
+#define IDR0_PTFS_SHIFT 24
+#define IDR0_SMS_SHIFT 27
+#define IDR0_NTS_SHIFT 28
+#define IDR0_S2TS_SHIFT 29
+#define IDR0_S1TS_SHIFT 30
+#define IDR0_SES_SHIFT 31
+
+/* Identification Register: IDR1 */
+#define IDR1_NUMCB_SHIFT 0
+#define IDR1_NUMSSDNDXB_SHIFT 8
+#define IDR1_SSDTP_SHIFT 12
+#define IDR1_SMCD_SHIFT 15
+#define IDR1_NUMS2CB_SHIFT 16
+#define IDR1_NUMPAGENDXB_SHIFT 28
+#define IDR1_PAGESIZE_SHIFT 31
+
+/* Identification Register: IDR2 */
+#define IDR2_IAS_SHIFT 0
+#define IDR2_OAS_SHIFT 4
+
+/* Identification Register: IDR7 */
+#define IDR7_MINOR_SHIFT 0
+#define IDR7_MAJOR_SHIFT 4
+
+/* Stream to Context Register: S2CR */
+#define S2CR_CBNDX_SHIFT 0
+#define s2CR_SHCFG_SHIFT 8
+#define S2CR_MTCFG_SHIFT 11
+#define S2CR_MEMATTR_SHIFT 12
+#define S2CR_TYPE_SHIFT 16
+#define S2CR_NSCFG_SHIFT 18
+#define S2CR_RACFG_SHIFT 20
+#define S2CR_WACFG_SHIFT 22
+#define S2CR_PRIVCFG_SHIFT 24
+#define S2CR_INSTCFG_SHIFT 26
+#define S2CR_TRANSIENTCFG_SHIFT 28
+#define S2CR_VMID_SHIFT 0
+#define S2CR_BSU_SHIFT 24
+#define S2CR_FB_SHIFT 26
+
+/* Stream Match Register: SMR */
+#define SMR_ID_SHIFT 0
+#define SMR_MASK_SHIFT 16
+#define SMR_VALID_SHIFT 31
+
+/* Global TLB Status: TLBGSTATUS */
+#define TLBGSTATUS_GSACTIVE_SHIFT 0
+
+/* Invalidate Hyp TLB by VA: TLBIVAH */
+#define TLBIVAH_ADDR_SHIFT 12
+
+/* Invalidate TLB by VMID: TLBIVMID */
+#define TLBIVMID_VMID_SHIFT 0
+
+/* Context Bank Attribute Register: CBAR */
+#define CBAR_VMID_SHIFT 0
+#define CBAR_CBNDX_SHIFT 8
+#define CBAR_BPSHCFG_SHIFT 8
+#define CBAR_HYPC_SHIFT 10
+#define CBAR_FB_SHIFT 11
+#define CBAR_MEMATTR_SHIFT 12
+#define CBAR_TYPE_SHIFT 16
+#define CBAR_BSU_SHIFT 18
+#define CBAR_RACFG_SHIFT 20
+#define CBAR_WACFG_SHIFT 22
+#define CBAR_IRPTNDX_SHIFT 24
+
+/* Context Bank Fault Restricted Syndrome Register A: CBFRSYNRA */
+#define CBFRSYNRA_SID_SHIFT 0
+
+/* Implementation defined register space shift */
+#define MICRO_MMU_CTRL_RESERVED_SHIFT 0x00
+#define MICRO_MMU_CTRL_HALT_REQ_SHIFT 0x02
+#define MICRO_MMU_CTRL_IDLE_SHIFT 0x03
+
+/* Stage 1 Context Bank Format Shifts */
+/* Auxiliary Control Register: CB_ACTLR */
+#define CB_ACTLR_REQPRIORITY_SHIFT 0
+#define CB_ACTLR_REQPRIORITYCFG_SHIFT 4
+#define CB_ACTLR_PRIVCFG_SHIFT 8
+#define CB_ACTLR_BPRCOSH_SHIFT 28
+#define CB_ACTLR_BPRCISH_SHIFT 29
+#define CB_ACTLR_BPRCNSH_SHIFT 30
+
+/* Address Translation, Stage 1, Privileged Read: CB_ATS1PR */
+#define CB_ATS1PR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, Privileged Write: CB_ATS1PW */
+#define CB_ATS1PW_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Read: CB_ATS1UR */
+#define CB_ATS1UR_ADDR_SHIFT 12
+
+/* Address Translation, Stage 1, User Write: CB_ATS1UW */
+#define CB_ATS1UW_ADDR_SHIFT 12
+
+/* Address Translation Status Register: CB_ATSR */
+#define CB_ATSR_ACTIVE_SHIFT 0
+
+/* Context ID Register: CB_CONTEXTIDR */
+#define CB_CONTEXTIDR_ASID_SHIFT 0
+#define CB_CONTEXTIDR_PROCID_SHIFT 8
+
+/* Fault Address Register: CB_FAR */
+#define CB_FAR_FADDR_SHIFT 0
+
+/* Fault Status Register: CB_FSR */
+#define CB_FSR_TF_SHIFT 1
+#define CB_FSR_AFF_SHIFT 2
+#define CB_FSR_PF_SHIFT 3
+#define CB_FSR_EF_SHIFT 4
+#define CB_FSR_TLBMCF_SHIFT 5
+#define CB_FSR_TLBLKF_SHIFT 6
+#define CB_FSR_SS_SHIFT 30
+#define CB_FSR_MULTI_SHIFT 31
+
+/* Fault Syndrome Register 0: CB_FSYNR0 */
+#define CB_FSYNR0_PLVL_SHIFT 0
+#define CB_FSYNR0_S1PTWF_SHIFT 3
+#define CB_FSYNR0_WNR_SHIFT 4
+#define CB_FSYNR0_PNU_SHIFT 5
+#define CB_FSYNR0_IND_SHIFT 6
+#define CB_FSYNR0_NSSTATE_SHIFT 7
+#define CB_FSYNR0_NSATTR_SHIFT 8
+#define CB_FSYNR0_ATOF_SHIFT 9
+#define CB_FSYNR0_PTWF_SHIFT 10
+#define CB_FSYNR0_AFR_SHIFT 11
+#define CB_FSYNR0_S1CBNDX_SHIFT 16
+
+/* Normal Memory Remap Register: CB_NMRR */
+#define CB_NMRR_IR0_SHIFT 0
+#define CB_NMRR_IR1_SHIFT 2
+#define CB_NMRR_IR2_SHIFT 4
+#define CB_NMRR_IR3_SHIFT 6
+#define CB_NMRR_IR4_SHIFT 8
+#define CB_NMRR_IR5_SHIFT 10
+#define CB_NMRR_IR6_SHIFT 12
+#define CB_NMRR_IR7_SHIFT 14
+#define CB_NMRR_OR0_SHIFT 16
+#define CB_NMRR_OR1_SHIFT 18
+#define CB_NMRR_OR2_SHIFT 20
+#define CB_NMRR_OR3_SHIFT 22
+#define CB_NMRR_OR4_SHIFT 24
+#define CB_NMRR_OR5_SHIFT 26
+#define CB_NMRR_OR6_SHIFT 28
+#define CB_NMRR_OR7_SHIFT 30
+
+/* Physical Address Register: CB_PAR */
+#define CB_PAR_F_SHIFT 0
+#define CB_PAR_SS_SHIFT 1
+#define CB_PAR_OUTER_SHIFT 2
+#define CB_PAR_INNER_SHIFT 4
+#define CB_PAR_SH_SHIFT 7
+#define CB_PAR_NS_SHIFT 9
+#define CB_PAR_NOS_SHIFT 10
+#define CB_PAR_PA_SHIFT 12
+#define CB_PAR_TF_SHIFT 1
+#define CB_PAR_AFF_SHIFT 2
+#define CB_PAR_PF_SHIFT 3
+#define CB_PAR_EF_SHIFT 4
+#define CB_PAR_TLBMCF_SHIFT 5
+#define CB_PAR_TLBLKF_SHIFT 6
+#define CB_PAR_ATOT_SHIFT 31
+#define CB_PAR_PLVL_SHIFT 32
+#define CB_PAR_STAGE_SHIFT 35
+
+/* Primary Region Remap Register: CB_PRRR */
+#define CB_PRRR_TR0_SHIFT 0
+#define CB_PRRR_TR1_SHIFT 2
+#define CB_PRRR_TR2_SHIFT 4
+#define CB_PRRR_TR3_SHIFT 6
+#define CB_PRRR_TR4_SHIFT 8
+#define CB_PRRR_TR5_SHIFT 10
+#define CB_PRRR_TR6_SHIFT 12
+#define CB_PRRR_TR7_SHIFT 14
+#define CB_PRRR_DS0_SHIFT 16
+#define CB_PRRR_DS1_SHIFT 17
+#define CB_PRRR_NS0_SHIFT 18
+#define CB_PRRR_NS1_SHIFT 19
+#define CB_PRRR_NOS0_SHIFT 24
+#define CB_PRRR_NOS1_SHIFT 25
+#define CB_PRRR_NOS2_SHIFT 26
+#define CB_PRRR_NOS3_SHIFT 27
+#define CB_PRRR_NOS4_SHIFT 28
+#define CB_PRRR_NOS5_SHIFT 29
+#define CB_PRRR_NOS6_SHIFT 30
+#define CB_PRRR_NOS7_SHIFT 31
+
+/* Transaction Resume: CB_RESUME */
+#define CB_RESUME_TNR_SHIFT 0
+
+/* System Control Register: CB_SCTLR */
+#define CB_SCTLR_M_SHIFT 0
+#define CB_SCTLR_TRE_SHIFT 1
+#define CB_SCTLR_AFE_SHIFT 2
+#define CB_SCTLR_AFFD_SHIFT 3
+#define CB_SCTLR_E_SHIFT 4
+#define CB_SCTLR_CFRE_SHIFT 5
+#define CB_SCTLR_CFIE_SHIFT 6
+#define CB_SCTLR_CFCFG_SHIFT 7
+#define CB_SCTLR_HUPCF_SHIFT 8
+#define CB_SCTLR_WXN_SHIFT 9
+#define CB_SCTLR_UWXN_SHIFT 10
+#define CB_SCTLR_ASIDPNE_SHIFT 12
+#define CB_SCTLR_TRANSIENTCFG_SHIFT 14
+#define CB_SCTLR_MEMATTR_SHIFT 16
+#define CB_SCTLR_MTCFG_SHIFT 20
+#define CB_SCTLR_SHCFG_SHIFT 22
+#define CB_SCTLR_RACFG_SHIFT 24
+#define CB_SCTLR_WACFG_SHIFT 26
+#define CB_SCTLR_NSCFG_SHIFT 28
+
+/* Invalidate TLB by ASID: CB_TLBIASID */
+#define CB_TLBIASID_ASID_SHIFT 0
+
+/* Invalidate TLB by VA: CB_TLBIVA */
+#define CB_TLBIVA_ASID_SHIFT 0
+#define CB_TLBIVA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID: CB_TLBIVAA */
+#define CB_TLBIVAA_VA_SHIFT 12
+
+/* Invalidate TLB by VA, All ASID, Last Level: CB_TLBIVAAL */
+#define CB_TLBIVAAL_VA_SHIFT 12
+
+/* Invalidate TLB by VA, Last Level: CB_TLBIVAL */
+#define CB_TLBIVAL_ASID_SHIFT 0
+#define CB_TLBIVAL_VA_SHIFT 12
+
+/* TLB Status: CB_TLBSTATUS */
+#define CB_TLBSTATUS_SACTIVE_SHIFT 0
+
+/* Translation Table Base Control Register: CB_TTBCR */
+#define CB_TTBCR_T0SZ_SHIFT 0
+#define CB_TTBCR_T1SZ_SHIFT 16
+#define CB_TTBCR_EPD0_SHIFT 4
+#define CB_TTBCR_EPD1_SHIFT 5
+#define CB_TTBCR_NSCFG0_SHIFT 14
+#define CB_TTBCR_NSCFG1_SHIFT 30
+#define CB_TTBCR_EAE_SHIFT 31
+#define CB_TTBCR_IRGN0_SHIFT 8
+#define CB_TTBCR_IRGN1_SHIFT 24
+#define CB_TTBCR_ORGN0_SHIFT 10
+#define CB_TTBCR_ORGN1_SHIFT 26
+#define CB_TTBCR_A1_SHIFT 22
+#define CB_TTBCR_SH0_SHIFT 12
+#define CB_TTBCR_SH1_SHIFT 28
+
+/* Translation Table Base Register 0/1: CB_TTBR */
+#ifdef CONFIG_IOMMU_LPAE
+#define CB_TTBR0_ADDR_SHIFT 5
+#define CB_TTBR0_ASID_SHIFT 48
+#define CB_TTBR1_ASID_SHIFT 48
+#else
+#define CB_TTBR0_IRGN1_SHIFT 0
+#define CB_TTBR0_S_SHIFT 1
+#define CB_TTBR0_RGN_SHIFT 3
+#define CB_TTBR0_NOS_SHIFT 5
+#define CB_TTBR0_IRGN0_SHIFT 6
+#define CB_TTBR0_ADDR_SHIFT 14
+
+#define CB_TTBR1_IRGN1_SHIFT 0
+#define CB_TTBR1_S_SHIFT 1
+#define CB_TTBR1_RGN_SHIFT 3
+#define CB_TTBR1_NOS_SHIFT 5
+#define CB_TTBR1_IRGN0_SHIFT 6
+#define CB_TTBR1_ADDR_SHIFT 14
+#endif
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.c b/drivers/iommu/qcom/msm_iommu_pagetable.c
new file mode 100644
index 000000000000..1f11abb9db7b
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.c
@@ -0,0 +1,645 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/scatterlist.h>
+
+#include <asm/cacheflush.h>
+
+#include <linux/qcom_iommu.h>
+#include "msm_iommu_priv.h"
+#include <trace/events/kmem.h>
+#include "msm_iommu_pagetable.h"
+
+#define NUM_FL_PTE 4096
+#define NUM_SL_PTE 256
+#define GUARD_PTE 2
+#define NUM_TEX_CLASS 8
+
+/* First-level page table bits */
+#define FL_BASE_MASK 0xFFFFFC00
+#define FL_TYPE_TABLE (1 << 0)
+#define FL_TYPE_SECT (2 << 0)
+#define FL_SUPERSECTION (1 << 18)
+#define FL_AP0 (1 << 10)
+#define FL_AP1 (1 << 11)
+#define FL_AP2 (1 << 15)
+#define FL_SHARED (1 << 16)
+#define FL_BUFFERABLE (1 << 2)
+#define FL_CACHEABLE (1 << 3)
+#define FL_TEX0 (1 << 12)
+#define FL_OFFSET(va) (((va) & 0xFFF00000) >> 20)
+#define FL_NG (1 << 17)
+
+/* Second-level page table bits */
+#define SL_BASE_MASK_LARGE 0xFFFF0000
+#define SL_BASE_MASK_SMALL 0xFFFFF000
+#define SL_TYPE_LARGE (1 << 0)
+#define SL_TYPE_SMALL (2 << 0)
+#define SL_AP0 (1 << 4)
+#define SL_AP1 (2 << 4)
+#define SL_AP2 (1 << 9)
+#define SL_SHARED (1 << 10)
+#define SL_BUFFERABLE (1 << 2)
+#define SL_CACHEABLE (1 << 3)
+#define SL_TEX0 (1 << 6)
+#define SL_OFFSET(va) (((va) & 0xFF000) >> 12)
+#define SL_NG (1 << 11)
+
+/* Memory type and cache policy attributes */
+#define MT_SO 0
+#define MT_DEV 1
+#define MT_IOMMU_NORMAL 2
+#define CP_NONCACHED 0
+#define CP_WB_WA 1
+#define CP_WT 2
+#define CP_WB_NWA 3
+
+/* Sharability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NON_SH 0x0
+#define MSM_IOMMU_ATTR_SH 0x4
+
+/* Cacheability attributes of MSM IOMMU mappings */
+#define MSM_IOMMU_ATTR_NONCACHED 0x0
+#define MSM_IOMMU_ATTR_CACHED_WB_WA 0x1
+#define MSM_IOMMU_ATTR_CACHED_WB_NWA 0x2
+#define MSM_IOMMU_ATTR_CACHED_WT 0x3
+
+static int msm_iommu_tex_class[4];
+
+/* TEX Remap Registers */
+#define NMRR_ICP(nmrr, n) (((nmrr) & (3 << ((n) * 2))) >> ((n) * 2))
+#define NMRR_OCP(nmrr, n) (((nmrr) & (3 << ((n) * 2 + 16))) >> ((n) * 2 + 16))
+
+#define PRRR_NOS(prrr, n) ((prrr) & (1 << ((n) + 24)) ? 1 : 0)
+#define PRRR_MT(prrr, n) ((((prrr) & (3 << ((n) * 2))) >> ((n) * 2)))
+
+static inline void clean_pte(u32 *start, u32 *end, int redirect)
+{
+ if (!redirect)
+ dmac_flush_range(start, end);
+}
+
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt)
+{
+ pt->fl_table = (u32 *)__get_free_pages(GFP_KERNEL, get_order(SZ_16K));
+ if (!pt->fl_table)
+ return -ENOMEM;
+
+ pt->fl_table_shadow = (u32 *)__get_free_pages(GFP_KERNEL,
+ get_order(SZ_16K));
+ if (!pt->fl_table_shadow) {
+ free_pages((unsigned long)pt->fl_table, get_order(SZ_16K));
+ return -ENOMEM;
+ }
+
+ memset(pt->fl_table, 0, SZ_16K);
+ memset(pt->fl_table_shadow, 0, SZ_16K);
+ clean_pte(pt->fl_table, pt->fl_table + NUM_FL_PTE, pt->redirect);
+
+ return 0;
+}
+
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt)
+{
+ u32 *fl_table;
+ u32 *fl_table_shadow;
+ int i;
+
+ fl_table = pt->fl_table;
+ fl_table_shadow = pt->fl_table_shadow;
+ for (i = 0; i < NUM_FL_PTE; i++)
+ if ((fl_table[i] & 0x03) == FL_TYPE_TABLE)
+ free_page((unsigned long) __va(((fl_table[i]) &
+ FL_BASE_MASK)));
+ free_pages((unsigned long)fl_table, get_order(SZ_16K));
+ pt->fl_table = 0;
+
+ free_pages((unsigned long)fl_table_shadow, get_order(SZ_16K));
+ pt->fl_table_shadow = 0;
+}
+
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ /*
+ * Adding 2 for worst case. We could be spanning 3 second level pages
+ * if we unmapped just over 1MB.
+ */
+ u32 n_entries = len / SZ_1M + 2;
+ u32 fl_offset = FL_OFFSET(va);
+ u32 i;
+
+ for (i = 0; i < n_entries && fl_offset < NUM_FL_PTE; ++i) {
+ u32 *fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ void *sl_table_va = __va(((*fl_pte_shadow) & ~0x1FF));
+ u32 sl_table = *fl_pte_shadow;
+
+ if (sl_table && !(sl_table & 0x1FF)) {
+ free_pages((unsigned long) sl_table_va,
+ get_order(SZ_4K));
+ *fl_pte_shadow = 0;
+ }
+ ++fl_offset;
+ }
+}
+
+static int __get_pgprot(int prot, int len)
+{
+ unsigned int pgprot;
+ int tex;
+
+ if (!(prot & (IOMMU_READ | IOMMU_WRITE))) {
+ prot |= IOMMU_READ | IOMMU_WRITE;
+ WARN_ONCE(1, "No attributes in iommu mapping; assuming RW\n");
+ }
+
+ if ((prot & IOMMU_WRITE) && !(prot & IOMMU_READ)) {
+ prot |= IOMMU_READ;
+ WARN_ONCE(1, "Write-only unsupported; falling back to RW\n");
+ }
+
+ if (prot & IOMMU_CACHE)
+ tex = (pgprot_val(PAGE_KERNEL) >> 2) & 0x07;
+ else
+ tex = msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED];
+
+ if (tex < 0 || tex > NUM_TEX_CLASS - 1)
+ return 0;
+
+ if (len == SZ_16M || len == SZ_1M) {
+ pgprot = FL_SHARED;
+ pgprot |= tex & 0x01 ? FL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? FL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? FL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? FL_AP0 :
+ (FL_AP0 | FL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : FL_AP2;
+ } else {
+ pgprot = SL_SHARED;
+ pgprot |= tex & 0x01 ? SL_BUFFERABLE : 0;
+ pgprot |= tex & 0x02 ? SL_CACHEABLE : 0;
+ pgprot |= tex & 0x04 ? SL_TEX0 : 0;
+ pgprot |= prot & IOMMU_PRIV ? SL_AP0 :
+ (SL_AP0 | SL_AP1);
+ pgprot |= prot & IOMMU_WRITE ? 0 : SL_AP2;
+ }
+
+ return pgprot;
+}
+
+static u32 *make_second_level(struct msm_iommu_pt *pt, u32 *fl_pte,
+ u32 *fl_pte_shadow)
+{
+ u32 *sl;
+ sl = (u32 *) __get_free_pages(GFP_KERNEL,
+ get_order(SZ_4K));
+
+ if (!sl) {
+ pr_debug("Could not allocate second level table\n");
+ goto fail;
+ }
+ memset(sl, 0, SZ_4K);
+ clean_pte(sl, sl + NUM_SL_PTE + GUARD_PTE, pt->redirect);
+
+ *fl_pte = ((((int)__pa(sl)) & FL_BASE_MASK) | \
+ FL_TYPE_TABLE);
+ *fl_pte_shadow = *fl_pte & ~0x1FF;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+fail:
+ return sl;
+}
+
+static int sl_4k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ if (*sl_pte) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ *sl_pte = (pa & SL_BASE_MASK_SMALL) | SL_NG | SL_SHARED
+ | SL_TYPE_SMALL | pgprot;
+fail:
+ return ret;
+}
+
+static int sl_64k(u32 *sl_pte, phys_addr_t pa, unsigned int pgprot)
+{
+ int ret = 0;
+
+ int i;
+
+ for (i = 0; i < 16; i++)
+ if (*(sl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(sl_pte+i) = (pa & SL_BASE_MASK_LARGE) | SL_NG
+ | SL_SHARED | SL_TYPE_LARGE | pgprot;
+
+fail:
+ return ret;
+}
+
+static inline int fl_1m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ if (*fl_pte)
+ return -EBUSY;
+
+ *fl_pte = (pa & 0xFFF00000) | FL_NG | FL_TYPE_SECT | FL_SHARED
+ | pgprot;
+
+ return 0;
+}
+
+static inline int fl_16m(u32 *fl_pte, phys_addr_t pa, int pgprot)
+{
+ int i;
+ int ret = 0;
+ for (i = 0; i < 16; i++)
+ if (*(fl_pte+i)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ for (i = 0; i < 16; i++)
+ *(fl_pte+i) = (pa & 0xFF000000) | FL_SUPERSECTION
+ | FL_TYPE_SECT | FL_SHARED | FL_NG | pgprot;
+fail:
+ return ret;
+}
+
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ int ret;
+ struct scatterlist sg;
+
+ if (len != SZ_16M && len != SZ_1M &&
+ len != SZ_64K && len != SZ_4K) {
+ pr_debug("Bad size: %zd\n", len);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = pa;
+ sg.length = len;
+
+ ret = msm_iommu_pagetable_map_range(pt, va, &sg, len, prot);
+
+fail:
+ return ret;
+}
+
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len)
+{
+ msm_iommu_pagetable_unmap_range(pt, va, len);
+ return len;
+}
+
+static phys_addr_t get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ phys_addr_t pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+/*
+ * For debugging we may want to force mappings to be 4K only
+ */
+#ifdef CONFIG_IOMMU_FORCE_4K_MAPPINGS
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ if (align == SZ_4K) {
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+ } else {
+ return 0;
+ }
+}
+#else
+static inline int is_fully_aligned(unsigned int va, phys_addr_t pa, size_t len,
+ int align)
+{
+ return IS_ALIGNED(va, align) && IS_ALIGNED(pa, align)
+ && (len >= align);
+}
+#endif
+
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot)
+{
+ phys_addr_t pa;
+ unsigned int start_va = va;
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset, sl_start;
+ unsigned int chunk_size, chunk_offset = 0;
+ int ret = 0;
+ unsigned int pgprot4k, pgprot64k, pgprot1m, pgprot16m;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ pgprot4k = __get_pgprot(prot, SZ_4K);
+ pgprot64k = __get_pgprot(prot, SZ_64K);
+ pgprot1m = __get_pgprot(prot, SZ_1M);
+ pgprot16m = __get_pgprot(prot, SZ_16M);
+ if (!pgprot4k || !pgprot64k || !pgprot1m || !pgprot16m) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+ pa = get_phys_addr(sg);
+
+ while (offset < len) {
+ chunk_size = SZ_4K;
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_16M))
+ chunk_size = SZ_16M;
+ else if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_1M))
+ chunk_size = SZ_1M;
+ /* 64k or 4k determined later */
+
+// trace_iommu_map_range(va, pa, sg->length, chunk_size);
+
+ /* for 1M and 16M, only first level entries are required */
+ if (chunk_size >= SZ_1M) {
+ if (chunk_size == SZ_16M) {
+ ret = fl_16m(fl_pte, pa, pgprot16m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 16, pt->redirect);
+ fl_pte += 16;
+ fl_pte_shadow += 16;
+ } else if (chunk_size == SZ_1M) {
+ ret = fl_1m(fl_pte, pa, pgprot1m);
+ if (ret)
+ goto fail;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ continue;
+ }
+ /* for 4K or 64K, make sure there is a second level table */
+ if (*fl_pte == 0) {
+ if (!make_second_level(pt, fl_pte, fl_pte_shadow)) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ }
+ if (!(*fl_pte & FL_TYPE_TABLE)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ /* Keep track of initial position so we
+ * don't clean more than we have to
+ */
+ sl_start = sl_offset;
+
+ /* Build the 2nd level page table */
+ while (offset < len && sl_offset < NUM_SL_PTE) {
+ /* Map a large 64K page if the chunk is large enough and
+ * the pa and va are aligned
+ */
+
+ if (is_fully_aligned(va, pa, sg->length - chunk_offset,
+ SZ_64K))
+ chunk_size = SZ_64K;
+ else
+ chunk_size = SZ_4K;
+
+// trace_iommu_map_range(va, pa, sg->length,
+// chunk_size);
+
+ if (chunk_size == SZ_4K) {
+ sl_4k(&sl_table[sl_offset], pa, pgprot4k);
+ sl_offset++;
+ /* Increment map count */
+ (*fl_pte_shadow)++;
+ } else {
+ BUG_ON(sl_offset + 16 > NUM_SL_PTE);
+ sl_64k(&sl_table[sl_offset], pa, pgprot64k);
+ sl_offset += 16;
+ /* Increment map count */
+ *fl_pte_shadow += 16;
+ }
+
+ offset += chunk_size;
+ chunk_offset += chunk_size;
+ va += chunk_size;
+ pa += chunk_size;
+
+ if (chunk_offset >= sg->length && offset < len) {
+ chunk_offset = 0;
+ sg = sg_next(sg);
+ pa = get_phys_addr(sg);
+ }
+ }
+
+ clean_pte(sl_table + sl_start, sl_table + sl_offset,
+ pt->redirect);
+ fl_pte++;
+ fl_pte_shadow++;
+ sl_offset = 0;
+ }
+
+fail:
+ if (ret && offset > 0)
+ msm_iommu_pagetable_unmap_range(pt, start_va, offset);
+
+ return ret;
+}
+
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len)
+{
+ unsigned int offset = 0;
+ u32 *fl_pte;
+ u32 *fl_pte_shadow;
+ u32 fl_offset;
+ u32 *sl_table;
+ u32 sl_start, sl_end;
+ int used;
+
+ BUG_ON(len & (SZ_4K - 1));
+
+ fl_offset = FL_OFFSET(va); /* Upper 12 bits */
+ fl_pte = pt->fl_table + fl_offset; /* int pointers, 4 bytes */
+ fl_pte_shadow = pt->fl_table_shadow + fl_offset;
+
+ while (offset < len) {
+ if (*fl_pte & FL_TYPE_TABLE) {
+ unsigned int n_entries;
+
+ sl_start = SL_OFFSET(va);
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_end = ((len - offset) / SZ_4K) + sl_start;
+
+ if (sl_end > NUM_SL_PTE)
+ sl_end = NUM_SL_PTE;
+ n_entries = sl_end - sl_start;
+
+ memset(sl_table + sl_start, 0, n_entries * 4);
+ clean_pte(sl_table + sl_start, sl_table + sl_end,
+ pt->redirect);
+
+ offset += n_entries * SZ_4K;
+ va += n_entries * SZ_4K;
+
+ BUG_ON((*fl_pte_shadow & 0x1FF) < n_entries);
+
+ /* Decrement map count */
+ *fl_pte_shadow -= n_entries;
+ used = *fl_pte_shadow & 0x1FF;
+
+ if (!used) {
+ *fl_pte = 0;
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ }
+
+ sl_start = 0;
+ } else {
+ *fl_pte = 0;
+ *fl_pte_shadow = 0;
+
+ clean_pte(fl_pte, fl_pte + 1, pt->redirect);
+ va += SZ_1M;
+ offset += SZ_1M;
+ sl_start = 0;
+ }
+ fl_pte++;
+ fl_pte_shadow++;
+ }
+}
+
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_pt *pt = &priv->pt;
+ u32 *fl_pte;
+ u32 fl_offset;
+ u32 *sl_table = NULL;
+ u32 sl_offset;
+ u32 *sl_pte;
+
+ if (!pt->fl_table) {
+ pr_err("Page table doesn't exist\n");
+ return 0;
+ }
+
+ fl_offset = FL_OFFSET(va);
+ fl_pte = pt->fl_table + fl_offset;
+
+ if (*fl_pte & FL_TYPE_TABLE) {
+ sl_table = __va(((*fl_pte) & FL_BASE_MASK));
+ sl_offset = SL_OFFSET(va);
+ sl_pte = sl_table + sl_offset;
+ /* 64 KB section */
+ if (*sl_pte & SL_TYPE_LARGE)
+ return (*sl_pte & 0xFFFF0000) | (va & ~0xFFFF0000);
+ /* 4 KB section */
+ if (*sl_pte & SL_TYPE_SMALL)
+ return (*sl_pte & 0xFFFFF000) | (va & ~0xFFFFF000);
+ } else {
+ /* 16 MB section */
+ if (*fl_pte & FL_SUPERSECTION)
+ return (*fl_pte & 0xFF000000) | (va & ~0xFF000000);
+ /* 1 MB section */
+ if (*fl_pte & FL_TYPE_SECT)
+ return (*fl_pte & 0xFFF00000) | (va & ~0xFFF00000);
+ }
+ return 0;
+}
+
+static int __init get_tex_class(int icp, int ocp, int mt, int nos)
+{
+ int i = 0;
+ unsigned int prrr;
+ unsigned int nmrr;
+ int c_icp, c_ocp, c_mt, c_nos;
+
+ prrr = msm_iommu_get_prrr();
+ nmrr = msm_iommu_get_nmrr();
+
+ for (i = 0; i < NUM_TEX_CLASS; i++) {
+ c_nos = PRRR_NOS(prrr, i);
+ c_mt = PRRR_MT(prrr, i);
+ c_icp = NMRR_ICP(nmrr, i);
+ c_ocp = NMRR_OCP(nmrr, i);
+
+ if (icp == c_icp && ocp == c_ocp && c_mt == mt && c_nos == nos)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static void __init setup_iommu_tex_classes(void)
+{
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_NONCACHED] =
+ get_tex_class(CP_NONCACHED, CP_NONCACHED,
+ MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_WA] =
+ get_tex_class(CP_WB_WA, CP_WB_WA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WB_NWA] =
+ get_tex_class(CP_WB_NWA, CP_WB_NWA, MT_IOMMU_NORMAL, 1);
+
+ msm_iommu_tex_class[MSM_IOMMU_ATTR_CACHED_WT] =
+ get_tex_class(CP_WT, CP_WT, MT_IOMMU_NORMAL, 1);
+}
+
+void __init msm_iommu_pagetable_init(void)
+{
+ setup_iommu_tex_classes();
+}
diff --git a/drivers/iommu/qcom/msm_iommu_pagetable.h b/drivers/iommu/qcom/msm_iommu_pagetable.h
new file mode 100644
index 000000000000..12a8d274f95e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_pagetable.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+#define __ARCH_ARM_MACH_MSM_IOMMU_PAGETABLE_H
+
+struct msm_iommu_pt;
+
+void msm_iommu_pagetable_init(void);
+int msm_iommu_pagetable_alloc(struct msm_iommu_pt *pt);
+void msm_iommu_pagetable_free(struct msm_iommu_pt *pt);
+int msm_iommu_pagetable_map(struct msm_iommu_pt *pt, unsigned long va,
+ phys_addr_t pa, size_t len, int prot);
+size_t msm_iommu_pagetable_unmap(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+int msm_iommu_pagetable_map_range(struct msm_iommu_pt *pt, unsigned int va,
+ struct scatterlist *sg, unsigned int len, int prot);
+void msm_iommu_pagetable_unmap_range(struct msm_iommu_pt *pt, unsigned int va,
+ unsigned int len);
+phys_addr_t msm_iommu_iova_to_phys_soft(struct iommu_domain *domain,
+ phys_addr_t va);
+void msm_iommu_pagetable_free_tables(struct msm_iommu_pt *pt, unsigned long va,
+ size_t len);
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_perfmon.h b/drivers/iommu/qcom/msm_iommu_perfmon.h
new file mode 100644
index 000000000000..45683f4ebd88
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_perfmon.h
@@ -0,0 +1,233 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/irqreturn.h>
+
+#ifndef MSM_IOMMU_PERFMON_H
+#define MSM_IOMMU_PERFMON_H
+
+/**
+ * struct iommu_pmon_counter - container for a performance counter.
+ * @counter_no: counter number within the group
+ * @absolute_counter_no: counter number within IOMMU PMU
+ * @value: cached counter value
+ * @overflow_count: no of times counter has overflowed
+ * @enabled: indicates whether counter is enabled or not
+ * @current_event_class: current selected event class, -1 if none
+ * @counter_dir: debugfs directory for this counter
+ * @cnt_group: group this counter belongs to
+ */
+struct iommu_pmon_counter {
+ unsigned int counter_no;
+ unsigned int absolute_counter_no;
+ unsigned long value;
+ unsigned long overflow_count;
+ unsigned int enabled;
+ int current_event_class;
+ struct dentry *counter_dir;
+ struct iommu_pmon_cnt_group *cnt_group;
+};
+
+/**
+ * struct iommu_pmon_cnt_group - container for a perf mon counter group.
+ * @grp_no: group number
+ * @num_counters: number of counters in this group
+ * @counters: list of counter in this group
+ * @group_dir: debugfs directory for this group
+ * @pmon: pointer to the iommu_pmon object this group belongs to
+ */
+struct iommu_pmon_cnt_group {
+ unsigned int grp_no;
+ unsigned int num_counters;
+ struct iommu_pmon_counter *counters;
+ struct dentry *group_dir;
+ struct iommu_pmon *pmon;
+};
+
+/**
+ * struct iommu_info - container for a perf mon iommu info.
+ * @iommu_name: name of the iommu from device tree
+ * @base: virtual base address for this iommu
+ * @evt_irq: irq number for event overflow interrupt
+ * @iommu_dev: pointer to iommu device
+ * @ops: iommu access operations pointer.
+ * @hw_ops: iommu pm hw access operations pointer.
+ * @always_on: 1 if iommu is always on, 0 otherwise.
+ */
+struct iommu_info {
+ const char *iommu_name;
+ void *base;
+ int evt_irq;
+ struct device *iommu_dev;
+ struct iommu_access_ops *ops;
+ struct iommu_pm_hw_ops *hw_ops;
+ unsigned int always_on;
+};
+
+/**
+ * struct iommu_pmon - main container for a perf mon data.
+ * @iommu_dir: debugfs directory for this iommu
+ * @iommu: iommu_info instance
+ * @iommu_list: iommu_list head
+ * @cnt_grp: list of counter groups
+ * @num_groups: number of counter groups
+ * @num_counters: number of counters per group
+ * @event_cls_supported: an array of event classes supported for this PMU
+ * @nevent_cls_supported: number of event classes supported.
+ * @enabled: Indicates whether perf. mon is enabled or not
+ * @iommu_attached Indicates whether iommu is attached or not.
+ * @lock: mutex used to synchronize access to shared data
+ */
+struct iommu_pmon {
+ struct dentry *iommu_dir;
+ struct iommu_info iommu;
+ struct list_head iommu_list;
+ struct iommu_pmon_cnt_group *cnt_grp;
+ u32 num_groups;
+ u32 num_counters;
+ u32 *event_cls_supported;
+ u32 nevent_cls_supported;
+ unsigned int enabled;
+ unsigned int iommu_attach_count;
+ struct mutex lock;
+};
+
+/**
+ * struct iommu_hw_ops - Callbacks for accessing IOMMU HW
+ * @initialize_hw: Call to do any initialization before enabling ovf interrupts
+ * @is_hw_access_ok: Returns 1 if we can access HW, 0 otherwise
+ * @grp_enable: Call to enable a counter group
+ * @grp_disable: Call to disable a counter group
+ * @enable_pm: Call to enable PM
+ * @disable_pm: Call to disable PM
+ * @reset_counters: Call to reset counters
+ * @check_for_overflow: Call to check for overflow
+ * @evt_ovfl_int_handler: Overflow interrupt handler callback
+ * @counter_enable: Call to enable counters
+ * @counter_disable: Call to disable counters
+ * @ovfl_int_enable: Call to enable overflow interrupts
+ * @ovfl_int_disable: Call to disable overflow interrupts
+ * @set_event_class: Call to set event class
+ * @read_counter: Call to read a counter value
+ */
+struct iommu_pm_hw_ops {
+ void (*initialize_hw)(const struct iommu_pmon *);
+ unsigned int (*is_hw_access_OK)(const struct iommu_pmon *);
+ void (*grp_enable)(struct iommu_info *, unsigned int);
+ void (*grp_disable)(struct iommu_info *, unsigned int);
+ void (*enable_pm)(struct iommu_info *);
+ void (*disable_pm)(struct iommu_info *);
+ void (*reset_counters)(const struct iommu_info *);
+ void (*check_for_overflow)(struct iommu_pmon *);
+ irqreturn_t (*evt_ovfl_int_handler)(int, void *);
+ void (*counter_enable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*counter_disable)(struct iommu_info *,
+ struct iommu_pmon_counter *);
+ void (*ovfl_int_enable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*ovfl_int_disable)(struct iommu_info *,
+ const struct iommu_pmon_counter *);
+ void (*set_event_class)(struct iommu_pmon *pmon, unsigned int,
+ unsigned int);
+ unsigned int (*read_counter)(struct iommu_pmon_counter *);
+};
+
+#define MSM_IOMMU_PMU_NO_EVENT_CLASS -1
+
+#ifdef CONFIG_MSM_IOMMU_PMON
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv0 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void);
+
+/**
+ * Get pointer to PMU hardware access functions for IOMMUv1 PMU
+ */
+struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void);
+
+/**
+ * Allocate memory for performance monitor structure. Must
+ * be called before iommu_pm_iommu_register
+ */
+struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev);
+
+/**
+ * Free memory previously allocated with iommu_pm_alloc
+ */
+void msm_iommu_pm_free(struct device *iommu_dev);
+
+/**
+ * Register iommu with the performance monitor module.
+ */
+int msm_iommu_pm_iommu_register(struct iommu_pmon *info);
+
+/**
+ * Unregister iommu with the performance monitor module.
+ */
+void msm_iommu_pm_iommu_unregister(struct device *dev);
+
+/**
+ * Called by iommu driver when attaching is complete
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is attached
+ */
+void msm_iommu_attached(struct device *dev);
+
+/**
+ * Called by iommu driver before detaching.
+ * Must NOT be called with IOMMU mutexes held.
+ * @param iommu_dev IOMMU device that is going to be detached
+ */
+void msm_iommu_detached(struct device *dev);
+#else
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v0(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pm_hw_ops *iommu_pm_get_hw_ops_v1(void)
+{
+ return NULL;
+}
+
+static inline struct iommu_pmon *msm_iommu_pm_alloc(struct device *iommu_dev)
+{
+ return NULL;
+}
+
+static inline void msm_iommu_pm_free(struct device *iommu_dev)
+{
+ return;
+}
+
+static inline int msm_iommu_pm_iommu_register(struct iommu_pmon *info)
+{
+ return -EIO;
+}
+
+static inline void msm_iommu_pm_iommu_unregister(struct device *dev)
+{
+}
+
+static inline void msm_iommu_attached(struct device *dev)
+{
+}
+
+static inline void msm_iommu_detached(struct device *dev)
+{
+}
+#endif
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_priv.h b/drivers/iommu/qcom/msm_iommu_priv.h
new file mode 100644
index 000000000000..4de0d7ef19e6
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_priv.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_PRIV_H
+#define MSM_IOMMU_PRIV_H
+
+/**
+ * struct msm_iommu_pt - Container for first level page table and its
+ * attributes.
+ * fl_table: Pointer to the first level page table.
+ * redirect: Set to 1 if L2 redirect for page tables are enabled, 0 otherwise.
+ * unaligned_fl_table: Original address of memory for the page table.
+ * fl_table is manually aligned (as per spec) but we need the original address
+ * to free the table.
+ * fl_table_shadow: This is "copy" of the fl_table with some differences.
+ * It stores the same information as fl_table except that instead of storing
+ * second level page table address + page table entry descriptor bits it
+ * stores the second level page table address and the number of used second
+ * level page tables entries. This is used to check whether we need to free
+ * the second level page table which allows us to also free the second level
+ * page table after doing a TLB invalidate which should catch bugs with
+ * clients trying to unmap an address that is being used.
+ * fl_table_shadow will use the lower 9 bits for the use count and the upper
+ * bits for the second level page table address.
+ * sl_table_shadow uses the same concept as fl_table_shadow but for LPAE 2nd
+ * level page tables.
+ */
+#ifdef CONFIG_IOMMU_LPAE
+struct msm_iommu_pt {
+ u64 *fl_table;
+ u64 **sl_table_shadow;
+ int redirect;
+ u64 *unaligned_fl_table;
+};
+#else
+struct msm_iommu_pt {
+ u32 *fl_table;
+ int redirect;
+ u32 *fl_table_shadow;
+};
+#endif
+/**
+ * struct msm_iommu_priv - Container for page table attributes and other
+ * private iommu domain information.
+ * attributes.
+ * pt: Page table attribute structure
+ * list_attached: List of devices (contexts) attached to this domain.
+ * client_name: Name of the domain client.
+ */
+struct msm_iommu_priv {
+ struct msm_iommu_pt pt;
+ struct list_head list_attached;
+ struct iommu_domain domain;
+ const char *client_name;
+};
+
+static inline struct msm_iommu_priv *to_msm_priv(struct iommu_domain *dom)
+{
+ return container_of(dom, struct msm_iommu_priv, domain);
+}
+
+#endif
diff --git a/drivers/iommu/qcom/msm_iommu_sec.c b/drivers/iommu/qcom/msm_iommu_sec.c
new file mode 100644
index 000000000000..33a1e988ae1e
--- /dev/null
+++ b/drivers/iommu/qcom/msm_iommu_sec.c
@@ -0,0 +1,876 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/clk.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/kmemleak.h>
+#include <linux/dma-mapping.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/sizes.h>
+
+#include "msm_iommu_perfmon.h"
+#include "msm_iommu_hw-v1.h"
+#include "msm_iommu_priv.h"
+#include <linux/qcom_iommu.h>
+#include <trace/events/kmem.h>
+
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
+/* commands for SCM_SVC_MP */
+#define IOMMU_SECURE_CFG 2
+#define IOMMU_SECURE_PTBL_SIZE 3
+#define IOMMU_SECURE_PTBL_INIT 4
+#define IOMMU_SET_CP_POOL_SIZE 5
+#define IOMMU_SECURE_MAP 6
+#define IOMMU_SECURE_UNMAP 7
+#define IOMMU_SECURE_MAP2 0x0B
+#define IOMMU_SECURE_MAP2_FLAT 0x12
+#define IOMMU_SECURE_UNMAP2 0x0C
+#define IOMMU_SECURE_UNMAP2_FLAT 0x13
+#define IOMMU_TLBINVAL_FLAG 0x00000001
+
+/* commands for SCM_SVC_UTIL */
+#define IOMMU_DUMP_SMMU_FAULT_REGS 0X0C
+#define MAXIMUM_VIRT_SIZE (300*SZ_1M)
+
+
+#define MAKE_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+
+static struct iommu_access_ops *iommu_access_ops;
+static int is_secure;
+
+static const struct of_device_id msm_smmu_list[] = {
+ { .compatible = "qcom,msm-smmu-v1", },
+ { .compatible = "qcom,msm-smmu-v2", },
+ { }
+};
+
+struct msm_scm_paddr_list {
+ unsigned int list;
+ unsigned int list_size;
+ unsigned int size;
+};
+
+struct msm_scm_mapping_info {
+ unsigned int id;
+ unsigned int ctx_id;
+ unsigned int va;
+ unsigned int size;
+};
+
+struct msm_scm_map2_req {
+ struct msm_scm_paddr_list plist;
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_scm_unmap2_req {
+ struct msm_scm_mapping_info info;
+ unsigned int flags;
+};
+
+struct msm_cp_pool_size {
+ uint32_t size;
+ uint32_t spare;
+};
+
+#define NUM_DUMP_REGS 14
+/*
+ * some space to allow the number of registers returned by the secure
+ * environment to grow
+ */
+#define WIGGLE_ROOM (NUM_DUMP_REGS * 2)
+/* Each entry is a (reg_addr, reg_val) pair, hence the * 2 */
+#define SEC_DUMP_SIZE ((NUM_DUMP_REGS * 2) + WIGGLE_ROOM)
+
+struct msm_scm_fault_regs_dump {
+ uint32_t dump_size;
+ uint32_t dump_data[SEC_DUMP_SIZE];
+} __aligned(PAGE_SIZE);
+
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops)
+{
+ iommu_access_ops = access_ops;
+}
+
+static int msm_iommu_dump_fault_regs(int smmu_id, int cb_num,
+ struct msm_scm_fault_regs_dump *regs)
+{
+ int ret;
+
+ dmac_clean_range(regs, regs + 1);
+
+ ret = qcom_scm_iommu_dump_fault_regs(smmu_id, cb_num,
+ virt_to_phys(regs), sizeof(*regs));
+
+ dmac_inv_range(regs, regs + 1);
+
+ return ret;
+}
+
+static int msm_iommu_reg_dump_to_regs(
+ struct msm_iommu_context_reg ctx_regs[],
+ struct msm_scm_fault_regs_dump *dump, struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ int i, j, ret = 0;
+ const uint32_t nvals = (dump->dump_size / sizeof(uint32_t));
+ uint32_t *it = (uint32_t *) dump->dump_data;
+ const uint32_t * const end = ((uint32_t *) dump) + nvals;
+ phys_addr_t phys_base = drvdata->phys_base;
+ int ctx = ctx_drvdata->num;
+
+ if (!nvals)
+ return -EINVAL;
+
+ for (i = 1; it < end; it += 2, i += 2) {
+ unsigned int reg_offset;
+ uint32_t addr = *it;
+ uint32_t val = *(it + 1);
+ struct msm_iommu_context_reg *reg = NULL;
+ if (addr < phys_base) {
+ pr_err("Bogus-looking register (0x%x) for Iommu with base at %pa. Skipping.\n",
+ addr, &phys_base);
+ continue;
+ }
+ reg_offset = addr - phys_base;
+
+ for (j = 0; j < MAX_DUMP_REGS; ++j) {
+ struct dump_regs_tbl_entry dump_reg = dump_regs_tbl[j];
+ void *test_reg;
+ unsigned int test_offset;
+ switch (dump_reg.dump_reg_type) {
+ case DRT_CTX_REG:
+ test_reg = CTX_REG(dump_reg.reg_offset,
+ drvdata->cb_base, ctx);
+ break;
+ case DRT_GLOBAL_REG:
+ test_reg = GLB_REG(
+ dump_reg.reg_offset, drvdata->glb_base);
+ break;
+ case DRT_GLOBAL_REG_N:
+ test_reg = GLB_REG_N(
+ drvdata->glb_base, ctx,
+ dump_reg.reg_offset);
+ break;
+ default:
+ pr_err("Unknown dump_reg_type: 0x%x\n",
+ dump_reg.dump_reg_type);
+ BUG();
+ break;
+ }
+ test_offset = test_reg - drvdata->glb_base;
+ if (test_offset == reg_offset) {
+ reg = &ctx_regs[j];
+ break;
+ }
+ }
+
+ if (reg == NULL) {
+ pr_debug("Unknown register in secure CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ if (reg->valid) {
+ WARN(1, "Invalid (repeated?) register in CB dump: %x\n",
+ addr);
+ continue;
+ }
+
+ reg->val = val;
+ reg->valid = true;
+ }
+
+ if (i != nvals) {
+ pr_err("Invalid dump! %d != %d\n", i, nvals);
+ ret = 1;
+ }
+
+ for (i = 0; i < MAX_DUMP_REGS; ++i) {
+ if (!ctx_regs[i].valid) {
+ if (dump_regs_tbl[i].must_be_present) {
+ pr_err("Register missing from dump for ctx %d: %s, 0x%x\n",
+ ctx,
+ dump_regs_tbl[i].name,
+ dump_regs_tbl[i].reg_offset);
+ ret = 1;
+ }
+ ctx_regs[i].val = 0xd00dfeed;
+ }
+ }
+
+ return ret;
+}
+
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ struct msm_iommu_drvdata *drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_scm_fault_regs_dump *regs;
+ int tmp, ret = IRQ_HANDLED;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ BUG_ON(!pdev);
+
+ drvdata = dev_get_drvdata(pdev->dev.parent);
+ BUG_ON(!drvdata);
+
+ ctx_drvdata = dev_get_drvdata(&pdev->dev);
+ BUG_ON(!ctx_drvdata);
+
+ regs = kzalloc(sizeof(*regs), GFP_KERNEL);
+ if (!regs) {
+ pr_err("%s: Couldn't allocate memory\n", __func__);
+ goto lock_release;
+ }
+
+ if (!drvdata->ctx_attach_count) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("Power is OFF. Unable to read page fault information\n");
+ /*
+ * We cannot determine which context bank caused the issue so
+ * we just return handled here to ensure IRQ handler code is
+ * happy
+ */
+ goto free_regs;
+ }
+
+ iommu_access_ops->iommu_clk_on(drvdata);
+ tmp = msm_iommu_dump_fault_regs(drvdata->sec_id,
+ ctx_drvdata->num, regs);
+ iommu_access_ops->iommu_clk_off(drvdata);
+
+ if (tmp) {
+ pr_err("%s: Couldn't dump fault registers (%d) %s, ctx: %d\n",
+ __func__, tmp, drvdata->name, ctx_drvdata->num);
+ goto free_regs;
+ } else {
+ struct msm_iommu_context_reg ctx_regs[MAX_DUMP_REGS];
+ memset(ctx_regs, 0, sizeof(ctx_regs));
+ tmp = msm_iommu_reg_dump_to_regs(
+ ctx_regs, regs, drvdata, ctx_drvdata);
+ if (tmp < 0) {
+ ret = IRQ_NONE;
+ pr_err("Incorrect response from secure environment\n");
+ goto free_regs;
+ }
+
+ if (ctx_regs[DUMP_REG_FSR].val) {
+ if (tmp)
+ pr_err("Incomplete fault register dump. Printout will be incomplete.\n");
+ if (!ctx_drvdata->attached_domain) {
+ pr_err("Bad domain in interrupt handler\n");
+ tmp = -ENOSYS;
+ } else {
+ tmp = report_iommu_fault(
+ ctx_drvdata->attached_domain,
+ &ctx_drvdata->pdev->dev,
+ COMBINE_DUMP_REG(
+ ctx_regs[DUMP_REG_FAR1].val,
+ ctx_regs[DUMP_REG_FAR0].val),
+ 0);
+ }
+
+ /* if the fault wasn't handled by someone else: */
+ if (tmp == -ENOSYS) {
+ pr_err("Unexpected IOMMU page fault from secure context bank!\n");
+ pr_err("name = %s\n", drvdata->name);
+ pr_err("context = %s (%d)\n", ctx_drvdata->name,
+ ctx_drvdata->num);
+ pr_err("Interesting registers:\n");
+ print_ctx_regs(ctx_regs);
+ }
+ } else {
+ ret = IRQ_NONE;
+ }
+ }
+free_regs:
+ kfree(regs);
+lock_release:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+#define SCM_SVC_MP 0xc
+
+static int msm_iommu_sec_ptbl_init(void)
+{
+ struct device_node *np;
+ int psize[2] = {0, 0};
+ unsigned int spare = 0;
+ int ret;
+ int version;
+ /* Use a dummy device for dma_alloc_attrs allocation */
+ struct device dev = { 0 };
+ void *cpu_addr;
+ dma_addr_t paddr;
+ DEFINE_DMA_ATTRS(attrs);
+
+ for_each_matching_node(np, msm_smmu_list)
+ if (of_find_property(np, "qcom,iommu-secure-id", NULL) &&
+ of_device_is_available(np))
+ break;
+
+ if (!np)
+ return 0;
+
+ of_node_put(np);
+
+ version = qcom_scm_get_feat_version(SCM_SVC_MP);
+
+ if (version >= MAKE_VERSION(1, 1, 1)) {
+ ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
+ if (ret) {
+ pr_err("scm call IOMMU_SET_CP_POOL_SIZE failed\n");
+ goto fail;
+ }
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ pr_err("iommu sec: psize[0]: %d, psize[1]: %d\n", psize[0], psize[1]);
+
+ if (psize[1]) {
+ pr_err("scm call IOMMU_SECURE_PTBL_SIZE failed\n");
+ goto fail;
+ }
+
+ dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ dev.coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ cpu_addr = dma_alloc_attrs(&dev, psize[0], &paddr, GFP_KERNEL, &attrs);
+ if (!cpu_addr) {
+ pr_err("%s: Failed to allocate %d bytes for PTBL\n",
+ __func__, psize[0]);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
+
+ if (ret) {
+ pr_err("scm call IOMMU_SECURE_PTBL_INIT failed (%d)\n", ret);
+ goto fail_mem;
+ }
+
+ return 0;
+
+fail_mem:
+ dma_free_attrs(&dev, psize[0], cpu_addr, paddr, &attrs);
+fail:
+ return ret;
+}
+
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata)
+{
+ if (drvdata->smmu_local_base) {
+ writel_relaxed(0xFFFFFFFF,
+ drvdata->smmu_local_base + SMMU_INTR_SEL_NS);
+ mb();
+ }
+
+ return qcom_scm_restore_sec_cfg(drvdata->sec_id, ctx_drvdata->num);
+}
+
+static int msm_iommu_sec_map2(struct msm_scm_map2_req *map)
+{
+ u32 flags;
+
+#ifdef CONFIG_MSM_IOMMU_TLBINVAL_ON_MAP
+ flags = IOMMU_TLBINVAL_FLAG;
+#else
+ flags = 0;
+#endif
+
+ return qcom_scm_iommu_secure_map(map->plist.list,
+ map->plist.list_size,
+ map->plist.size,
+ map->info.id,
+ map->info.ctx_id,
+ map->info.va,
+ map->info.size,
+ flags);
+}
+
+static int msm_iommu_sec_ptbl_map(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, phys_addr_t pa, size_t len)
+{
+ struct msm_scm_map2_req map;
+ void *flush_va, *flush_va_end;
+ int ret = 0;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M) ||
+ !IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ flush_va = &pa;
+ flush_va_end = (void *)
+ (((unsigned long) flush_va) + sizeof(phys_addr_t));
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int get_phys_addr(struct scatterlist *sg)
+{
+ /*
+ * Try sg_dma_address first so that we can
+ * map carveout regions that do not have a
+ * struct page associated with them.
+ */
+ unsigned int pa = sg_dma_address(sg);
+ if (pa == 0)
+ pa = sg_phys(sg);
+ return pa;
+}
+
+static int msm_iommu_sec_ptbl_map_range(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, struct scatterlist *sg, size_t len)
+{
+ struct scatterlist *sgiter;
+ struct msm_scm_map2_req map;
+ unsigned int *pa_list = 0;
+ unsigned int pa, cnt;
+ void *flush_va, *flush_va_end;
+ unsigned int offset = 0, chunk_offset = 0;
+ int ret;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ map.info.id = iommu_drvdata->sec_id;
+ map.info.ctx_id = ctx_drvdata->num;
+ map.info.va = va;
+ map.info.size = len;
+
+ if (sg->length == len) {
+ /*
+ * physical address for secure mapping needs
+ * to be 1MB aligned
+ */
+ pa = get_phys_addr(sg);
+ if (!IS_ALIGNED(pa, SZ_1M))
+ return -EINVAL;
+ map.plist.list = virt_to_phys(&pa);
+ map.plist.list_size = 1;
+ map.plist.size = len;
+ flush_va = &pa;
+ } else {
+ sgiter = sg;
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt = sg->length / SZ_1M;
+ while ((sgiter = sg_next(sgiter))) {
+ if (!IS_ALIGNED(sgiter->length, SZ_1M))
+ return -EINVAL;
+ cnt += sgiter->length / SZ_1M;
+ }
+
+ pa_list = kmalloc(cnt * sizeof(*pa_list), GFP_KERNEL);
+ if (!pa_list)
+ return -ENOMEM;
+
+ sgiter = sg;
+ cnt = 0;
+ pa = get_phys_addr(sgiter);
+ if (!IS_ALIGNED(pa, SZ_1M)) {
+ kfree(pa_list);
+ return -EINVAL;
+ }
+ while (offset < len) {
+ pa += chunk_offset;
+ pa_list[cnt] = pa;
+ chunk_offset += SZ_1M;
+ offset += SZ_1M;
+ cnt++;
+
+ if (chunk_offset >= sgiter->length && offset < len) {
+ chunk_offset = 0;
+ sgiter = sg_next(sgiter);
+ pa = get_phys_addr(sgiter);
+ }
+ }
+
+ map.plist.list = virt_to_phys(pa_list);
+ map.plist.list_size = cnt;
+ map.plist.size = SZ_1M;
+ flush_va = pa_list;
+ }
+
+ /*
+ * Ensure that the buffer is in RAM by the time it gets to TZ
+ */
+ flush_va_end = (void *) (((unsigned long) flush_va) +
+ (map.plist.list_size * sizeof(*pa_list)));
+ dmac_clean_range(flush_va, flush_va_end);
+
+ ret = msm_iommu_sec_map2(&map);
+ kfree(pa_list);
+
+ return ret;
+}
+
+static int msm_iommu_sec_ptbl_unmap(struct msm_iommu_drvdata *iommu_drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata,
+ unsigned long va, size_t len)
+{
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ return qcom_scm_iommu_secure_unmap(iommu_drvdata->sec_id,
+ ctx_drvdata->num,
+ va,
+ len,
+ IOMMU_TLBINVAL_FLAG);
+}
+
+static struct iommu_domain * msm_iommu_domain_alloc(unsigned type)
+{
+ struct msm_iommu_priv *priv;
+ struct iommu_domain *domain;
+
+ if (type != IOMMU_DOMAIN_UNMANAGED)
+ return NULL;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+
+ INIT_LIST_HEAD(&priv->list_attached);
+ domain = &priv->domain;
+ return domain;
+}
+
+static void msm_iommu_domain_free(struct iommu_domain *domain)
+{
+ struct msm_iommu_priv *priv;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+ priv = to_msm_priv(domain);
+
+ kfree(priv);
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
+{
+ struct msm_iommu_priv *priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ struct msm_iommu_ctx_drvdata *tmp_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ priv = to_msm_priv(domain);
+ if (!priv || !dev) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!list_empty(&ctx_drvdata->attached_elm)) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ list_for_each_entry(tmp_drvdata, &priv->list_attached, attached_elm)
+ if (tmp_drvdata == ctx_drvdata) {
+ ret = -EBUSY;
+ goto fail;
+ }
+
+ ret = iommu_access_ops->iommu_power_on(iommu_drvdata);
+ if (ret)
+ goto fail;
+
+ /* We can only do this once */
+ if (!iommu_drvdata->ctx_attach_count) {
+ ret = iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+
+ ret = msm_iommu_sec_program_iommu(iommu_drvdata,
+ ctx_drvdata);
+
+ /* bfb settings are always programmed by HLOS */
+ program_iommu_bfb_settings(iommu_drvdata->base,
+ iommu_drvdata->bfb_settings);
+
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+ if (ret) {
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ goto fail;
+ }
+ }
+
+ list_add(&(ctx_drvdata->attached_elm), &priv->list_attached);
+ ctx_drvdata->attached_domain = domain;
+ ++iommu_drvdata->ctx_attach_count;
+
+ iommu_access_ops->iommu_lock_release(0);
+
+ msm_iommu_attached(dev->parent);
+ return ret;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static void msm_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ if (!dev)
+ return;
+
+ msm_iommu_detached(dev->parent);
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ iommu_drvdata = dev_get_drvdata(dev->parent);
+ ctx_drvdata = dev_get_drvdata(dev);
+ if (!iommu_drvdata || !ctx_drvdata || !ctx_drvdata->attached_domain)
+ goto fail;
+
+ list_del_init(&ctx_drvdata->attached_elm);
+ ctx_drvdata->attached_domain = NULL;
+
+ iommu_access_ops->iommu_power_off(iommu_drvdata);
+ BUG_ON(iommu_drvdata->ctx_attach_count == 0);
+ --iommu_drvdata->ctx_attach_count;
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+}
+
+static int get_drvdata(struct iommu_domain *domain,
+ struct msm_iommu_drvdata **iommu_drvdata,
+ struct msm_iommu_ctx_drvdata **ctx_drvdata)
+{
+ struct msm_iommu_priv *priv = to_msm_priv(domain);
+ struct msm_iommu_ctx_drvdata *ctx;
+
+ list_for_each_entry(ctx, &priv->list_attached, attached_elm) {
+ if (ctx->attached_domain == domain)
+ break;
+ }
+
+ if (ctx->attached_domain != domain)
+ return -EINVAL;
+
+ *ctx_drvdata = ctx;
+ *iommu_drvdata = dev_get_drvdata(ctx->pdev->dev.parent);
+ return 0;
+}
+
+static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
+ phys_addr_t pa, size_t len, int prot)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = 0;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map(iommu_drvdata, ctx_drvdata,
+ va, pa, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -ENODEV;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata,
+ va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
+}
+
+static int msm_iommu_map_range(struct iommu_domain *domain, unsigned int va,
+ struct scatterlist *sg, unsigned int len,
+ int prot)
+{
+ int ret;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_map_range(iommu_drvdata, ctx_drvdata,
+ va, sg, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret;
+}
+
+
+static int msm_iommu_unmap_range(struct iommu_domain *domain, unsigned int va,
+ unsigned int len)
+{
+ struct msm_iommu_drvdata *iommu_drvdata;
+ struct msm_iommu_ctx_drvdata *ctx_drvdata;
+ int ret = -EINVAL;
+
+ if (!IS_ALIGNED(va, SZ_1M) || !IS_ALIGNED(len, SZ_1M))
+ return -EINVAL;
+
+ iommu_access_ops->iommu_lock_acquire(0);
+
+ ret = get_drvdata(domain, &iommu_drvdata, &ctx_drvdata);
+ if (ret)
+ goto fail;
+
+ iommu_access_ops->iommu_clk_on(iommu_drvdata);
+ ret = msm_iommu_sec_ptbl_unmap(iommu_drvdata, ctx_drvdata, va, len);
+ iommu_access_ops->iommu_clk_off(iommu_drvdata);
+
+fail:
+ iommu_access_ops->iommu_lock_release(0);
+ return ret ? ret : 0;
+}
+
+static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
+ phys_addr_t va)
+{
+ return 0;
+}
+
+void msm_iommu_check_scm_call_avail(void)
+{
+ is_secure = qcom_scm_is_call_available(SCM_SVC_MP, IOMMU_SECURE_CFG);
+}
+
+int msm_iommu_get_scm_call_avail(void)
+{
+ return is_secure;
+}
+
+static struct iommu_ops msm_iommu_ops = {
+ .domain_alloc = msm_iommu_domain_alloc,
+ .domain_free = msm_iommu_domain_free,
+ .attach_dev = msm_iommu_attach_dev,
+ .detach_dev = msm_iommu_detach_dev,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+/* .map_range = msm_iommu_map_range,*/
+ .map_sg = default_iommu_map_sg,
+/* .unmap_range = msm_iommu_unmap_range,*/
+ .iova_to_phys = msm_iommu_iova_to_phys,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
+};
+
+static int __init msm_iommu_sec_init(void)
+{
+ int ret;
+
+ ret = bus_register(&msm_iommu_sec_bus_type);
+ if (ret)
+ return ret;
+
+ ret = bus_set_iommu(&msm_iommu_sec_bus_type, &msm_iommu_ops);
+ if (ret) {
+ bus_unregister(&msm_iommu_sec_bus_type);
+ return ret;
+ }
+
+ ret = msm_iommu_sec_ptbl_init();
+
+ return ret;
+}
+
+subsys_initcall(msm_iommu_sec_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU Secure Driver");
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 6afc9fabd94c..63886e443a5e 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -373,6 +373,42 @@ static const struct of_device_id qcom_rpm_of_match[] = {
};
MODULE_DEVICE_TABLE(of, qcom_rpm_of_match);
+#define QCOM_RPM_STATUS_ID_SEQUENCE 7
+int qcom_rpm_read(struct qcom_rpm *rpm,
+ int resource,
+ u32 *val,
+ size_t count)
+{
+ const struct qcom_rpm_resource *res;
+ const struct qcom_rpm_data *data = rpm->data;
+ uint32_t seq_begin;
+ uint32_t seq_end;
+ int rc;
+ int i;
+
+ if (WARN_ON(resource < 0 || resource >= data->n_resources))
+ return -EINVAL;
+
+ res = &data->resource_table[resource];
+ if (WARN_ON(res->size != count))
+ return -EINVAL;
+
+ mutex_lock(&rpm->lock);
+ seq_begin = readl(RPM_STATUS_REG(rpm, QCOM_RPM_STATUS_ID_SEQUENCE));
+
+ for (i = 0; i < count; i++)
+ *val++ = readl(RPM_STATUS_REG(rpm, res->status_id));
+
+ seq_end = readl(RPM_STATUS_REG(rpm, QCOM_RPM_STATUS_ID_SEQUENCE));
+
+ rc = (seq_begin != seq_end || (seq_begin & 0x01)) ? -EBUSY : 0;
+
+ mutex_unlock(&rpm->lock);
+ return rc;
+
+}
+EXPORT_SYMBOL(qcom_rpm_read);
+
int qcom_rpm_write(struct qcom_rpm *rpm,
int state,
int resource,
diff --git a/drivers/mfd/ssbi.c b/drivers/mfd/ssbi.c
index 27986f641f7d..39734999fe40 100644
--- a/drivers/mfd/ssbi.c
+++ b/drivers/mfd/ssbi.c
@@ -330,7 +330,12 @@ static struct platform_driver ssbi_driver = {
.of_match_table = ssbi_match_table,
},
};
-module_platform_driver(ssbi_driver);
+
+static int ssbi_init(void)
+{
+ return platform_driver_register(&ssbi_driver);
+}
+subsys_initcall(ssbi_init);
MODULE_LICENSE("GPL v2");
MODULE_VERSION("1.0");
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..bc08c3b1335c 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -5,6 +5,8 @@
menuconfig MMC
tristate "MMC/SD/SDIO card support"
depends on HAS_IOMEM
+ select PWRSEQ_SIMPLE
+ select PWRSEQ_EMMC
help
This selects MultiMediaCard, Secure Digital and Secure
Digital I/O support.
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index f771bc3496a4..0525054f4078 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -26,3 +26,11 @@ config MMC_PARANOID_SD_INIT
about re-trying SD init requests. This can be a useful
work-around for buggy controllers and hardware. Enable
if you are experiencing issues with SD detection.
+
+config PWRSEQ_EMMC
+ tristate "PwrSeq EMMC"
+ depends on OF
+
+config PWRSEQ_SIMPLE
+ tristate "PwrSeq Simple"
+ depends on OF
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b7..b281675edb92 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -8,5 +8,7 @@ mmc_core-y := core.o bus.o host.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
quirks.o slot-gpio.o
-mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
+mmc_core-$(CONFIG_OF) += pwrseq.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_PWRSEQ_SIMPLE) += pwrseq_simple.o
+obj-$(CONFIG_PWRSEQ_EMMC) += pwrseq_emmc.o
diff --git a/drivers/mmc/core/pwrseq.c b/drivers/mmc/core/pwrseq.c
index 4c1d1757dbf9..9af53a66e646 100644
--- a/drivers/mmc/core/pwrseq.c
+++ b/drivers/mmc/core/pwrseq.c
@@ -8,50 +8,28 @@
* MMC power sequence management
*/
#include <linux/kernel.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-
#include <linux/mmc/host.h>
-
#include "pwrseq.h"
-struct mmc_pwrseq_match {
- const char *compatible;
- struct mmc_pwrseq *(*alloc)(struct mmc_host *host, struct device *dev);
-};
-
-static struct mmc_pwrseq_match pwrseq_match[] = {
- {
- .compatible = "mmc-pwrseq-simple",
- .alloc = mmc_pwrseq_simple_alloc,
- }, {
- .compatible = "mmc-pwrseq-emmc",
- .alloc = mmc_pwrseq_emmc_alloc,
- },
-};
-
-static struct mmc_pwrseq_match *mmc_pwrseq_find(struct device_node *np)
+static DEFINE_MUTEX(pwrseq_list_mutex);
+static LIST_HEAD(pwrseq_list);
+
+static struct mmc_pwrseq *of_find_mmc_pwrseq(struct device_node *np)
{
- struct mmc_pwrseq_match *match = ERR_PTR(-ENODEV);
- int i;
+ struct mmc_pwrseq *p;
- for (i = 0; i < ARRAY_SIZE(pwrseq_match); i++) {
- if (of_device_is_compatible(np, pwrseq_match[i].compatible)) {
- match = &pwrseq_match[i];
- break;
- }
- }
+ list_for_each_entry(p, &pwrseq_list, list)
+ if (p->dev->of_node == np)
+ return p;
- return match;
+ return NULL;
}
int mmc_pwrseq_alloc(struct mmc_host *host)
{
- struct platform_device *pdev;
struct device_node *np;
- struct mmc_pwrseq_match *match;
struct mmc_pwrseq *pwrseq;
int ret = 0;
@@ -59,25 +37,18 @@ int mmc_pwrseq_alloc(struct mmc_host *host)
if (!np)
return 0;
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- ret = -ENODEV;
- goto err;
- }
-
- match = mmc_pwrseq_find(np);
- if (IS_ERR(match)) {
- ret = PTR_ERR(match);
- goto err;
- }
+ pwrseq = of_find_mmc_pwrseq(np);
- pwrseq = match->alloc(host, &pdev->dev);
- if (IS_ERR(pwrseq)) {
- ret = PTR_ERR(pwrseq);
- goto err;
+ if (pwrseq && pwrseq->ops && pwrseq->ops->alloc) {
+ host->pwrseq = pwrseq;
+ ret = pwrseq->ops->alloc(host);
+ if (IS_ERR_VALUE(ret)) {
+ host->pwrseq = NULL;
+ goto err;
+ }
}
+ pwrseq->users++;
- host->pwrseq = pwrseq;
dev_info(host->parent, "allocated mmc-pwrseq\n");
err:
@@ -116,5 +87,32 @@ void mmc_pwrseq_free(struct mmc_host *host)
if (pwrseq && pwrseq->ops && pwrseq->ops->free)
pwrseq->ops->free(host);
+ pwrseq->users--;
host->pwrseq = NULL;
}
+
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq || !pwrseq->ops || !pwrseq->dev)
+ return -EINVAL;
+
+ mutex_lock(&pwrseq_list_mutex);
+ list_add(&pwrseq->list, &pwrseq_list);
+ mutex_unlock(&pwrseq_list_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_register);
+
+int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ if (!pwrseq->users) {
+ mutex_lock(&pwrseq_list_mutex);
+ list_del(&pwrseq->list);
+ mutex_unlock(&pwrseq_list_mutex);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+EXPORT_SYMBOL_GPL(mmc_pwrseq_unregister);
diff --git a/drivers/mmc/core/pwrseq.h b/drivers/mmc/core/pwrseq.h
index 096da48c6a7e..462c0abfb651 100644
--- a/drivers/mmc/core/pwrseq.h
+++ b/drivers/mmc/core/pwrseq.h
@@ -8,7 +8,10 @@
#ifndef _MMC_CORE_PWRSEQ_H
#define _MMC_CORE_PWRSEQ_H
+#include <linux/mmc/host.h>
+
struct mmc_pwrseq_ops {
+ int (*alloc)(struct mmc_host *host);
void (*pre_power_on)(struct mmc_host *host);
void (*post_power_on)(struct mmc_host *host);
void (*power_off)(struct mmc_host *host);
@@ -17,23 +20,35 @@ struct mmc_pwrseq_ops {
struct mmc_pwrseq {
struct mmc_pwrseq_ops *ops;
+ struct device *dev;
+ struct list_head list;
+ int users;
+
};
#ifdef CONFIG_OF
+int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq);
+int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq);
+
int mmc_pwrseq_alloc(struct mmc_host *host);
void mmc_pwrseq_pre_power_on(struct mmc_host *host);
void mmc_pwrseq_post_power_on(struct mmc_host *host);
void mmc_pwrseq_power_off(struct mmc_host *host);
void mmc_pwrseq_free(struct mmc_host *host);
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev);
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev);
-
#else
+static inline int mmc_pwrseq_register(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+
+static inline int mmc_pwrseq_unregister(struct mmc_pwrseq *pwrseq)
+{
+ return -ENOSYS;
+}
+
static inline int mmc_pwrseq_alloc(struct mmc_host *host) { return 0; }
static inline void mmc_pwrseq_pre_power_on(struct mmc_host *host) {}
static inline void mmc_pwrseq_post_power_on(struct mmc_host *host) {}
diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
index 9d6d2fb21796..0b12bd79ba8e 100644
--- a/drivers/mmc/core/pwrseq_emmc.c
+++ b/drivers/mmc/core/pwrseq_emmc.c
@@ -9,6 +9,8 @@
*/
#include <linux/delay.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -16,7 +18,6 @@
#include <linux/reboot.h>
#include <linux/mmc/host.h>
-
#include "pwrseq.h"
struct mmc_pwrseq_emmc {
@@ -25,6 +26,8 @@ struct mmc_pwrseq_emmc {
struct gpio_desc *reset_gpio;
};
+#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
+
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
{
gpiod_set_value(pwrseq->reset_gpio, 1);
@@ -35,52 +38,37 @@ static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
__mmc_pwrseq_emmc_reset(pwrseq);
}
static void mmc_pwrseq_emmc_free(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_emmc, pwrseq);
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
unregister_restart_handler(&pwrseq->reset_nb);
gpiod_put(pwrseq->reset_gpio);
- kfree(pwrseq);
}
-static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
- .post_power_on = mmc_pwrseq_emmc_reset,
- .free = mmc_pwrseq_emmc_free,
-};
-
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
unsigned long mode, void *cmd)
{
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
- struct mmc_pwrseq_emmc, reset_nb);
+ struct mmc_pwrseq_emmc, reset_nb);
__mmc_pwrseq_emmc_reset(pwrseq);
return NOTIFY_DONE;
}
-struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
- struct device *dev)
+static int mmc_pwrseq_emmc_alloc(struct mmc_host *host)
{
- struct mmc_pwrseq_emmc *pwrseq;
- int ret = 0;
-
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_emmc), GFP_KERNEL);
- if (!pwrseq)
- return ERR_PTR(-ENOMEM);
-
- pwrseq->reset_gpio = gpiod_get_index(dev, "reset", 0, GPIOD_OUT_LOW);
- if (IS_ERR(pwrseq->reset_gpio)) {
- ret = PTR_ERR(pwrseq->reset_gpio);
- goto free;
- }
+ struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
+
+ pwrseq->reset_gpio = gpiod_get_index(host->pwrseq->dev,
+ "reset", 0, GPIOD_OUT_LOW);
+ if (IS_ERR(pwrseq->reset_gpio))
+ return PTR_ERR(pwrseq->reset_gpio);
/*
* register reset handler to ensure emmc reset also from
@@ -91,10 +79,53 @@ struct mmc_pwrseq *mmc_pwrseq_emmc_alloc(struct mmc_host *host,
pwrseq->reset_nb.priority = 129;
register_restart_handler(&pwrseq->reset_nb);
+ return 0;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_emmc_ops = {
+ .alloc = mmc_pwrseq_emmc_alloc,
+ .post_power_on = mmc_pwrseq_emmc_reset,
+ .free = mmc_pwrseq_emmc_free,
+};
+
+static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *pwrseq;
+ struct device *dev = &pdev->dev;
+
+ pwrseq = devm_kzalloc(dev, sizeof(*pwrseq), GFP_KERNEL);
+ if (!pwrseq)
+ return -ENOMEM;
+
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
+ pwrseq->pwrseq.dev = dev;
+
+ platform_set_drvdata(pdev, pwrseq);
- return &pwrseq->pwrseq;
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return mmc_pwrseq_register(&pwrseq->pwrseq);
}
+
+static int mmc_pwrseq_emmc_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_emmc *spwrseq = platform_get_drvdata(pdev);
+
+ return mmc_pwrseq_unregister(&spwrseq->pwrseq);
+}
+
+static const struct of_device_id mmc_pwrseq_emmc_of_match[] = {
+ { .compatible = "mmc-pwrseq-emmc",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_emmc_of_match);
+
+static struct platform_driver mmc_pwrseq_emmc_driver = {
+ .probe = mmc_pwrseq_emmc_probe,
+ .remove = mmc_pwrseq_emmc_remove,
+ .driver = {
+ .name = "pwrseq_emmc",
+ .of_match_table = mmc_pwrseq_emmc_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_emmc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c
index 0b14b83a53d6..f2a7613fd1e5 100644
--- a/drivers/mmc/core/pwrseq_simple.c
+++ b/drivers/mmc/core/pwrseq_simple.c
@@ -9,6 +9,7 @@
*/
#include <linux/clk.h>
#include <linux/kernel.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -27,6 +28,8 @@ struct mmc_pwrseq_simple {
struct gpio_desc *reset_gpios[0];
};
+#define to_pwrseq_simple(p) container_of(p, struct mmc_pwrseq_simple, pwrseq)
+
static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
int value)
{
@@ -39,8 +42,7 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq,
static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
if (!IS_ERR(pwrseq->ext_clk) && !pwrseq->clk_enabled) {
clk_prepare_enable(pwrseq->ext_clk);
@@ -52,16 +54,14 @@ static void mmc_pwrseq_simple_pre_power_on(struct mmc_host *host)
static void mmc_pwrseq_simple_post_power_on(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 0);
}
static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
mmc_pwrseq_simple_set_gpios_value(pwrseq, 1);
@@ -73,8 +73,7 @@ static void mmc_pwrseq_simple_power_off(struct mmc_host *host)
static void mmc_pwrseq_simple_free(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq = container_of(host->pwrseq,
- struct mmc_pwrseq_simple, pwrseq);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
int i;
for (i = 0; i < pwrseq->nr_gpios; i++)
@@ -84,39 +83,22 @@ static void mmc_pwrseq_simple_free(struct mmc_host *host)
if (!IS_ERR(pwrseq->ext_clk))
clk_put(pwrseq->ext_clk);
- kfree(pwrseq);
}
-static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
- .pre_power_on = mmc_pwrseq_simple_pre_power_on,
- .post_power_on = mmc_pwrseq_simple_post_power_on,
- .power_off = mmc_pwrseq_simple_power_off,
- .free = mmc_pwrseq_simple_free,
-};
-
-struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
- struct device *dev)
+int mmc_pwrseq_simple_alloc(struct mmc_host *host)
{
- struct mmc_pwrseq_simple *pwrseq;
- int i, nr_gpios, ret = 0;
-
- nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
- if (nr_gpios < 0)
- nr_gpios = 0;
-
- pwrseq = kzalloc(sizeof(struct mmc_pwrseq_simple) + nr_gpios *
- sizeof(struct gpio_desc *), GFP_KERNEL);
- if (!pwrseq)
- return ERR_PTR(-ENOMEM);
+ struct mmc_pwrseq_simple *pwrseq = to_pwrseq_simple(host->pwrseq);
+ struct device *dev = host->pwrseq->dev;
+ int i, ret = 0;
pwrseq->ext_clk = clk_get(dev, "ext_clock");
if (IS_ERR(pwrseq->ext_clk) &&
PTR_ERR(pwrseq->ext_clk) != -ENOENT) {
- ret = PTR_ERR(pwrseq->ext_clk);
- goto free;
+ return PTR_ERR(pwrseq->ext_clk);
+
}
- for (i = 0; i < nr_gpios; i++) {
+ for (i = 0; i < pwrseq->nr_gpios; i++) {
pwrseq->reset_gpios[i] = gpiod_get_index(dev, "reset", i,
GPIOD_OUT_HIGH);
if (IS_ERR(pwrseq->reset_gpios[i]) &&
@@ -127,18 +109,70 @@ struct mmc_pwrseq *mmc_pwrseq_simple_alloc(struct mmc_host *host,
while (i--)
gpiod_put(pwrseq->reset_gpios[i]);
- goto clk_put;
+ if (!IS_ERR(pwrseq->ext_clk))
+ clk_put(pwrseq->ext_clk);
+
+ return -EINVAL;
}
}
- pwrseq->nr_gpios = nr_gpios;
- pwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
- return &pwrseq->pwrseq;
-clk_put:
- if (!IS_ERR(pwrseq->ext_clk))
- clk_put(pwrseq->ext_clk);
-free:
- kfree(pwrseq);
- return ERR_PTR(ret);
+ return 0;
+}
+
+static struct mmc_pwrseq_ops mmc_pwrseq_simple_ops = {
+ .alloc = mmc_pwrseq_simple_alloc,
+ .pre_power_on = mmc_pwrseq_simple_pre_power_on,
+ .post_power_on = mmc_pwrseq_simple_post_power_on,
+ .power_off = mmc_pwrseq_simple_power_off,
+ .free = mmc_pwrseq_simple_free,
+};
+
+static const struct of_device_id mmc_pwrseq_simple_of_match[] = {
+ { .compatible = "mmc-pwrseq-simple",},
+ {/* sentinel */},
+};
+MODULE_DEVICE_TABLE(of, mmc_pwrseq_simple_of_match);
+
+static int mmc_pwrseq_simple_probe(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *spwrseq;
+ struct device *dev = &pdev->dev;
+ int nr_gpios;
+
+ nr_gpios = of_gpio_named_count(dev->of_node, "reset-gpios");
+ if (nr_gpios < 0)
+ nr_gpios = 0;
+
+ spwrseq = devm_kzalloc(dev, sizeof(struct mmc_pwrseq_simple) + nr_gpios *
+ sizeof(struct gpio_desc *), GFP_KERNEL);
+ if (!spwrseq)
+ return -ENOMEM;
+
+ spwrseq->pwrseq.dev = dev;
+ spwrseq->nr_gpios = nr_gpios;
+ spwrseq->pwrseq.ops = &mmc_pwrseq_simple_ops;
+
+ platform_set_drvdata(pdev, spwrseq);
+
+ return mmc_pwrseq_register(&spwrseq->pwrseq);
}
+
+static int mmc_pwrseq_simple_remove(struct platform_device *pdev)
+{
+ struct mmc_pwrseq_simple *spwrseq = platform_get_drvdata(pdev);
+
+ return mmc_pwrseq_unregister(&spwrseq->pwrseq);
+}
+
+static struct platform_driver mmc_pwrseq_simple_driver = {
+ .probe = mmc_pwrseq_simple_probe,
+ .remove = mmc_pwrseq_simple_remove,
+ .driver = {
+ .name = "pwrseq_simple",
+ .of_match_table = mmc_pwrseq_simple_of_match,
+ },
+};
+
+module_platform_driver(mmc_pwrseq_simple_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index fb266745f824..70805ba9577c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -78,6 +78,7 @@ static unsigned int fmax = 515633;
* @qcom_fifo: enables qcom specific fifo pio read logic.
* @qcom_dml: enables qcom specific dma glue for dma transfers.
* @reversed_irq_handling: handle data irq before cmd irq.
+ * @any_blksize: true if block any sizes are supported
*/
struct variant_data {
unsigned int clkreg;
@@ -104,6 +105,7 @@ struct variant_data {
bool qcom_fifo;
bool qcom_dml;
bool reversed_irq_handling;
+ bool any_blksize;
};
static struct variant_data variant_arm = {
@@ -200,6 +202,7 @@ static struct variant_data variant_ux500v2 = {
.pwrreg_clkgate = true,
.busy_detect = true,
.pwrreg_nopower = true,
+ .any_blksize = true,
};
static struct variant_data variant_qcom = {
@@ -218,6 +221,7 @@ static struct variant_data variant_qcom = {
.explicit_mclk_control = true,
.qcom_fifo = true,
.qcom_dml = true,
+ .any_blksize = true,
};
static int mmci_card_busy(struct mmc_host *mmc)
@@ -245,10 +249,11 @@ static int mmci_card_busy(struct mmc_host *mmc)
static int mmci_validate_data(struct mmci_host *host,
struct mmc_data *data)
{
+ struct variant_data *variant = host->variant;
+
if (!data)
return 0;
-
- if (!is_power_of_2(data->blksz)) {
+ if (!is_power_of_2(data->blksz) && !variant->any_blksize) {
dev_err(mmc_dev(host->mmc),
"unsupported block size (%d bytes)\n", data->blksz);
return -EINVAL;
@@ -804,7 +809,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
writel(host->size, base + MMCIDATALENGTH);
blksz_bits = ffs(data->blksz) - 1;
- BUG_ON(1 << blksz_bits != data->blksz);
if (variant->blksz_datactrl16)
datactrl = MCI_DPSM_ENABLE | (data->blksz << 16);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4bcee033feda..4fbbc9712e6a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -522,6 +522,7 @@ static int sdhci_msm_probe(struct platform_device *pdev)
/* Set HC_MODE_EN bit in HC_MODE register */
writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ host->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index fbc7efdddcb5..c21801a4556d 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1621,10 +1621,6 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (host->flags & SDHCI_DEVICE_DEAD)
return 0;
- /* If nonremovable, assume that the card is always present. */
- if (host->mmc->caps & MMC_CAP_NONREMOVABLE)
- return 1;
-
/*
* Try slot gpio detect, if defined it take precedence
* over build in controller functionality
@@ -1632,8 +1628,9 @@ static int sdhci_do_get_cd(struct sdhci_host *host)
if (!IS_ERR_VALUE(gpio_cd))
return !!gpio_cd;
- /* If polling, assume that the card is always present. */
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ /* If polling/nonremovable, assume that the card is always present. */
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
return 1;
/* Host native card detect */
diff --git a/drivers/net/wireless/ath/wcn36xx/Makefile b/drivers/net/wireless/ath/wcn36xx/Makefile
index 50c43b4382ba..9f6370f0cabc 100644
--- a/drivers/net/wireless/ath/wcn36xx/Makefile
+++ b/drivers/net/wireless/ath/wcn36xx/Makefile
@@ -1,7 +1,10 @@
-obj-$(CONFIG_WCN36XX) := wcn36xx.o
+obj-$(CONFIG_WCN36XX) := wcn36xx.o wcn36xx-platform.o
wcn36xx-y += main.o \
dxe.o \
txrx.o \
smd.o \
pmc.o \
debug.o
+
+wcn36xx-platform-y += wcn36xx-msm.o\
+ wcnss_core.o
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 086549b732b9..196c73fe8b27 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -46,7 +46,7 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
do { \
- if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ if (wcn->chip_version != WCN36XX_CHIP_3660) \
wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
else \
wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
@@ -79,6 +79,7 @@ static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
struct wcn36xx_dxe_ctl *cur_ctl = NULL;
int i;
+ spin_lock_init(&ch->lock);
for (i = 0; i < ch->desc_num; i++) {
cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
if (!cur_ctl)
@@ -169,7 +170,7 @@ void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
}
-static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
+static int wcn36xx_dxe_init_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
{
struct wcn36xx_dxe_desc *cur_dxe = NULL;
struct wcn36xx_dxe_desc *prev_dxe = NULL;
@@ -178,7 +179,7 @@ static int wcn36xx_dxe_init_descs(struct wcn36xx_dxe_ch *wcn_ch)
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
- wcn_ch->cpu_addr = dma_alloc_coherent(NULL, size, &wcn_ch->dma_addr,
+ wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
@@ -270,7 +271,7 @@ static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
return 0;
}
-static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
+static int wcn36xx_dxe_fill_skb(struct device *dev, struct wcn36xx_dxe_ctl *ctl)
{
struct wcn36xx_dxe_desc *dxe = ctl->desc;
struct sk_buff *skb;
@@ -279,7 +280,7 @@ static int wcn36xx_dxe_fill_skb(struct wcn36xx_dxe_ctl *ctl)
if (skb == NULL)
return -ENOMEM;
- dxe->dst_addr_l = dma_map_single(NULL,
+ dxe->dst_addr_l = dma_map_single(dev,
skb_tail_pointer(skb),
WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
@@ -297,7 +298,7 @@ static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
- wcn36xx_dxe_fill_skb(cur_ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl);
cur_ctl = cur_ctl->next;
}
@@ -345,7 +346,7 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
{
- struct wcn36xx_dxe_ctl *ctl = ch->tail_blk_ctl;
+ struct wcn36xx_dxe_ctl *ctl;
struct ieee80211_tx_info *info;
unsigned long flags;
@@ -354,23 +355,25 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
* completely full head and tail are pointing to the same element
* and while-do will not make any cycles.
*/
+ spin_lock_irqsave(&ch->lock, flags);
+ ctl = ch->tail_blk_ctl;
do {
if (ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)
break;
if (ctl->skb) {
- dma_unmap_single(NULL, ctl->desc->src_addr_l,
+ dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* Keep frame until TX status comes */
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
- spin_lock_irqsave(&ctl->skb_lock, flags);
+ spin_lock(&ctl->skb_lock);
if (wcn->queues_stopped) {
wcn->queues_stopped = false;
ieee80211_wake_queues(wcn->hw);
}
- spin_unlock_irqrestore(&ctl->skb_lock, flags);
+ spin_unlock(&ctl->skb_lock);
ctl->skb = NULL;
}
@@ -379,6 +382,7 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
!(ctl->desc->ctrl & WCN36XX_DXE_CTRL_VALID_MASK));
ch->tail_blk_ctl = ctl;
+ spin_unlock_irqrestore(&ch->lock, flags);
}
static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
@@ -474,7 +478,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
while (!(dxe->ctrl & WCN36XX_DXE_CTRL_VALID_MASK)) {
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
- wcn36xx_dxe_fill_skb(ctl);
+ wcn36xx_dxe_fill_skb(wcn->dev, ctl);
switch (ch->ch_type) {
case WCN36XX_DXE_CH_RX_L:
@@ -491,7 +495,7 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
wcn36xx_warn("Unknown channel\n");
}
- dma_unmap_single(NULL, dma_addr, WCN36XX_PKT_SIZE,
+ dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
wcn36xx_rx_skb(wcn, skb);
ctl = ctl->next;
@@ -540,7 +544,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
- cpu_addr = dma_alloc_coherent(NULL, s, &wcn->mgmt_mem_pool.phy_addr,
+ cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->mgmt_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -555,7 +559,7 @@ int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
- cpu_addr = dma_alloc_coherent(NULL, s, &wcn->data_mem_pool.phy_addr,
+ cpu_addr = dma_alloc_coherent(wcn->dev, s, &wcn->data_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
@@ -574,13 +578,13 @@ out_err:
void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
{
if (wcn->mgmt_mem_pool.virt_addr)
- dma_free_coherent(NULL, wcn->mgmt_mem_pool.chunk_size *
+ dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_H,
wcn->mgmt_mem_pool.virt_addr,
wcn->mgmt_mem_pool.phy_addr);
if (wcn->data_mem_pool.virt_addr) {
- dma_free_coherent(NULL, wcn->data_mem_pool.chunk_size *
+ dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_L,
wcn->data_mem_pool.virt_addr,
wcn->data_mem_pool.phy_addr);
@@ -596,12 +600,14 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct wcn36xx_dxe_desc *desc = NULL;
struct wcn36xx_dxe_ch *ch = NULL;
unsigned long flags;
+ int ret;
ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
+ spin_lock_irqsave(&ch->lock, flags);
ctl = ch->head_blk_ctl;
- spin_lock_irqsave(&ctl->next->skb_lock, flags);
+ spin_lock(&ctl->next->skb_lock);
/*
* If skb is not null that means that we reached the tail of the ring
@@ -611,10 +617,11 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
if (NULL != ctl->next->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
- spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ spin_unlock(&ctl->next->skb_lock);
+ spin_unlock_irqrestore(&ch->lock, flags);
return -EBUSY;
}
- spin_unlock_irqrestore(&ctl->next->skb_lock, flags);
+ spin_unlock(&ctl->next->skb_lock);
ctl->skb = NULL;
desc = ctl->desc;
@@ -640,10 +647,11 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
desc = ctl->desc;
if (ctl->bd_cpu_addr) {
wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
- desc->src_addr_l = dma_map_single(NULL,
+ desc->src_addr_l = dma_map_single(wcn->dev,
ctl->skb->data,
ctl->skb->len,
DMA_TO_DEVICE);
@@ -679,7 +687,10 @@ int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
ch->reg_ctrl, ch->def_ctrl);
}
- return 0;
+ ret = 0;
+unlock:
+ spin_unlock_irqrestore(&ch->lock, flags);
+ return ret;
}
int wcn36xx_dxe_init(struct wcn36xx *wcn)
@@ -696,7 +707,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_tx_l_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_l_ch);
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
/* Write channel head to a NEXT register */
@@ -714,7 +725,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for TX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_tx_h_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_tx_h_ch);
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
/* Write channel head to a NEXT register */
@@ -734,7 +745,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_rx_l_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_l_ch);
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
@@ -764,7 +775,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
- wcn36xx_dxe_init_descs(&wcn->dxe_rx_h_ch);
+ wcn36xx_dxe_init_descs(wcn->dev, &wcn->dxe_rx_h_ch);
/* For RX we need to prealocat buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index 35ee7e966bd2..f869fd3a9951 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -243,6 +243,7 @@ struct wcn36xx_dxe_ctl {
};
struct wcn36xx_dxe_ch {
+ spinlock_t lock;
enum wcn36xx_dxe_ch_type ch_type;
void *cpu_addr;
dma_addr_t dma_addr;
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index a1f1127d7808..df4425d410e6 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -345,6 +345,10 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_DHCP_START_IND = 189,
WCN36XX_HAL_DHCP_STOP_IND = 190,
+ WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
+
+ WCN36XX_HAL_PRINT_REG_INFO_IND = 259,
+
WCN36XX_HAL_MSG_MAX = WCN36XX_HAL_MSG_TYPE_MAX_ENUM_SIZE
};
@@ -4381,6 +4385,20 @@ enum place_holder_in_cap_bitmap {
RTT = 20,
RATECTRL = 21,
WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+
MAX_FEATURE_SUPPORTED = 128,
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 900e72a089d8..bbd4eca5e9f1 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -201,7 +201,21 @@ static const char * const wcn36xx_caps_names[] = {
"BCN_FILTER", /* 19 */
"RTT", /* 20 */
"RATECTRL", /* 21 */
- "WOW" /* 22 */
+ "WOW", /* 22 */
+ "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
+ "SPECULATIVE_PS_POLL", /* 24 */
+ "SCAN_SCH", /* 25 */
+ "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
+ "WLAN_SCAN_OFFLOAD", /* 27 */
+ "WLAN_PERIODIC_TX_PTRN", /* 28 */
+ "ADVANCE_TDLS", /* 29 */
+ "BATCH_SCAN", /* 30 */
+ "FW_IN_TX_PATH", /* 31 */
+ "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
+ "CH_SWITCH_V1", /* 33 */
+ "HT40_OBSS_SCAN", /* 34 */
+ "UPDATE_CHANNEL_LIST", /* 35 */
+
};
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -221,17 +235,6 @@ static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
}
}
-static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
-{
- if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
- wcn36xx_info("Chip is 3680\n");
- wcn->chip_version = WCN36XX_CHIP_3680;
- } else {
- wcn36xx_info("Chip is 3660\n");
- wcn->chip_version = WCN36XX_CHIP_3660;
- }
-}
-
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -286,8 +289,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_feat_caps_info(wcn);
}
- wcn36xx_detect_chip_version(wcn);
-
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -951,6 +952,10 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
+ /* 3620 powersaving currently unstable */
+ if (wcn->chip_version == WCN36XX_CHIP_3620)
+ __clear_bit(IEEE80211_HW_SUPPORTS_PS, wcn->hw->flags);
+
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
@@ -1036,11 +1041,25 @@ static int wcn36xx_probe(struct platform_device *pdev)
wcn = hw->priv;
wcn->hw = hw;
wcn->dev = &pdev->dev;
- wcn->ctrl_ops = pdev->dev.platform_data;
+ wcn->dev->dma_mask = kzalloc(sizeof(*wcn->dev->dma_mask), GFP_KERNEL);
+ if (!wcn->dev->dma_mask) {
+ ret = -ENOMEM;
+ goto dma_mask_err;
+ }
+ dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
+ wcn->wcn36xx_data = pdev->dev.platform_data;
+ wcn->ctrl_ops = &wcn->wcn36xx_data->ctrl_ops;
+ wcn->wcn36xx_data->wcn = wcn;
+ if (!wcn->ctrl_ops->get_chip_type) {
+ dev_err(&pdev->dev, "Missing ops->get_chip_type\n");
+ ret = -EINVAL;
+ goto out_wq;
+ }
+ wcn->chip_version = wcn->ctrl_ops->get_chip_type(wcn);
mutex_init(&wcn->hal_mutex);
- if (!wcn->ctrl_ops->get_hw_mac(addr)) {
+ if (!wcn->ctrl_ops->get_hw_mac(wcn, addr)) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
}
@@ -1059,6 +1078,8 @@ static int wcn36xx_probe(struct platform_device *pdev)
out_unmap:
iounmap(wcn->mmio);
out_wq:
+ kfree(wcn->dev->dma_mask);
+dma_mask_err:
ieee80211_free_hw(hw);
out_err:
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index c9263e1c75d4..d950f286a9d5 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -253,7 +253,7 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
- ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
+ ret = wcn->ctrl_ops->tx(wcn, wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
goto out;
@@ -302,6 +302,23 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
return 0;
}
+static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
+ size_t len)
+{
+ struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
+
+ if (wcn->chip_version != WCN36XX_CHIP_3620 ||
+ len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
+ return wcn36xx_smd_rsp_status_check(buf, len);
+
+ rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
+
+ if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
+ return rsp->status;
+
+ return 0;
+}
+
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
struct nv_data *nv_d;
@@ -1582,7 +1599,8 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_remove_bsskey failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
+ wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
goto out;
@@ -1951,7 +1969,8 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
+ wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
@@ -2082,6 +2101,7 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_msg_header *msg_header = buf;
@@ -2128,6 +2148,10 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
complete(&wcn->hal_rsp_compl);
break;
+ case WCN36XX_HAL_DEL_BA_IND:
+ case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ case WCN36XX_HAL_COEX_IND:
+ case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
@@ -2174,6 +2198,11 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
switch (msg_header->msg_type) {
+ case WCN36XX_HAL_DEL_BA_IND:
+ case WCN36XX_HAL_PRINT_REG_INFO_IND:
+ case WCN36XX_HAL_COEX_IND:
+ case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
+ break;
case WCN36XX_HAL_OTA_TX_COMPL_IND:
wcn36xx_smd_tx_compl_ind(wcn,
hal_ind_msg->msg,
@@ -2227,7 +2256,7 @@ out:
void wcn36xx_smd_close(struct wcn36xx *wcn)
{
- wcn->ctrl_ops->close();
+ wcn->ctrl_ops->close(wcn);
destroy_workqueue(wcn->hal_ind_wq);
mutex_destroy(&wcn->hal_ind_mutex);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 008d03423dbf..8361f9e3995b 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -44,6 +44,15 @@ struct wcn36xx_fw_msg_status_rsp {
u32 status;
} __packed;
+/* wcn3620 returns this for tigger_ba */
+
+struct wcn36xx_fw_msg_status_rsp_v2 {
+ u8 bss_id[6];
+ u32 status __packed;
+ u16 count_following_candidates __packed;
+ /* candidate list follows */
+};
+
struct wcn36xx_hal_ind_msg {
struct list_head list;
u8 *msg;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c b/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c
new file mode 100644
index 000000000000..647f623f1985
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx-msm.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2013 Eugene Krasnikov <k.eugene.e@gmail.com>
+ * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/remoteproc.h>
+#include <linux/soc/qcom/smd.h>
+#include "wcn36xx.h"
+#include "wcnss_core.h"
+
+#define MAC_ADDR_0 "wlan/macaddr0"
+
+struct smd_packet_item {
+ struct list_head list;
+ void *buf;
+ size_t count;
+};
+
+static int wcn36xx_msm_smsm_change_state(u32 clear_mask, u32 set_mask)
+{
+ return 0;
+}
+
+static int wcn36xx_msm_get_hw_mac(struct wcn36xx *wcn, u8 *addr)
+{
+ const struct firmware *addr_file = NULL;
+ int status;
+ u8 tmp[18];
+ static const u8 qcom_oui[3] = {0x00, 0x0A, 0xF5};
+ static const char *files = {MAC_ADDR_0};
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ status = request_firmware(&addr_file, files, &pdata->core->dev);
+
+ if (status < 0) {
+ /* Assign a random mac with Qualcomm oui */
+ dev_err(&pdata->core->dev, "Failed (%d) to read macaddress"
+ "file %s, using a random address instead", status, files);
+ memcpy(addr, qcom_oui, 3);
+ get_random_bytes(addr + 3, 3);
+ } else {
+ memset(tmp, 0, sizeof(tmp));
+ memcpy(tmp, addr_file->data, sizeof(tmp) - 1);
+ sscanf(tmp, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &addr[0],
+ &addr[1],
+ &addr[2],
+ &addr[3],
+ &addr[4],
+ &addr[5]);
+
+ release_firmware(addr_file);
+ }
+
+ return 0;
+}
+
+static int wcn36xx_msm_smd_send_and_wait(struct wcn36xx *wcn, char *buf, size_t len)
+{
+ int ret = 0;
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ mutex_lock(&pdata->wlan_ctrl_lock);
+ ret = qcom_smd_send(pdata->wlan_ctrl_channel, buf, len);
+ if (ret) {
+ dev_err(wcn->dev, "wlan ctrl channel tx failed\n");
+ }
+ mutex_unlock(&pdata->wlan_ctrl_lock);
+
+ return ret;
+}
+
+static int wcn36xx_msm_smd_open(struct wcn36xx *wcn, void *rsp_cb)
+{
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+
+ pdata->cb = rsp_cb;
+ return 0;
+}
+
+static void wcn36xx_msm_smd_close(struct wcn36xx *wcn)
+{
+ return;
+}
+
+static int wcn36xx_msm_get_chip_type(struct wcn36xx *wcn)
+{
+ struct wcn36xx_platform_data *pdata = wcn->wcn36xx_data;
+ return pdata->chip_type;
+}
+
+static struct wcn36xx_platform_data wcn36xx_data = {
+ .ctrl_ops = {
+ .open = wcn36xx_msm_smd_open,
+ .close = wcn36xx_msm_smd_close,
+ .tx = wcn36xx_msm_smd_send_and_wait,
+ .get_hw_mac = wcn36xx_msm_get_hw_mac,
+ .smsm_change_state = wcn36xx_msm_smsm_change_state,
+ .get_chip_type = wcn36xx_msm_get_chip_type,
+ },
+};
+
+static void wlan_ctrl_smd_process(struct work_struct *worker)
+{
+ unsigned long flags;
+ struct wcn36xx_platform_data *pdata =
+ container_of(worker,
+ struct wcn36xx_platform_data, packet_process_work);
+
+ spin_lock_irqsave(&pdata->packet_lock, flags);
+ while (!list_empty(&pdata->packet_list)) {
+ struct smd_packet_item *packet;
+
+ packet = list_first_entry(&pdata->packet_list,
+ struct smd_packet_item, list);
+ list_del(&packet->list);
+ spin_unlock_irqrestore(&pdata->packet_lock, flags);
+ pdata->cb(pdata->wcn, packet->buf, packet->count);
+ kfree(packet->buf);
+ spin_lock_irqsave(&pdata->packet_lock, flags);
+ }
+ spin_unlock_irqrestore(&pdata->packet_lock, flags);
+}
+
+static int qcom_smd_wlan_ctrl_probe(struct qcom_smd_device *sdev)
+{
+ pr_info("%s: enter\n", __func__);
+ mutex_init(&wcn36xx_data.wlan_ctrl_lock);
+ init_completion(&wcn36xx_data.wlan_ctrl_ack);
+
+ wcn36xx_data.sdev = sdev;
+ spin_lock_init(&wcn36xx_data.packet_lock);
+ INIT_LIST_HEAD(&wcn36xx_data.packet_list);
+ INIT_WORK(&wcn36xx_data.packet_process_work, wlan_ctrl_smd_process);
+
+ dev_set_drvdata(&sdev->dev, &wcn36xx_data);
+ wcn36xx_data.wlan_ctrl_channel = sdev->channel;
+
+ of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+
+ return 0;
+}
+
+static void qcom_smd_wlan_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static int qcom_smd_wlan_ctrl_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ unsigned long flags;
+ struct smd_packet_item *packet = NULL;
+ struct wcn36xx_platform_data *pdata = dev_get_drvdata(&qsdev->dev);
+ void *buf = kzalloc(count + sizeof(struct smd_packet_item),
+ GFP_ATOMIC);
+ if (!buf) {
+ dev_err(&pdata->core->dev, "can't allocate buffer\n");
+ return -ENOMEM;
+ }
+
+ memcpy_fromio(buf, data, count);
+ packet = buf + count;
+ packet->buf = buf;
+ packet->count = count;
+
+ spin_lock_irqsave(&pdata->packet_lock, flags);
+ list_add_tail(&packet->list, &pdata->packet_list);
+ spin_unlock_irqrestore(&pdata->packet_lock, flags);
+ schedule_work(&pdata->packet_process_work);
+
+ /* buf will be freed in workqueue */
+
+ return 0;
+}
+
+static const struct qcom_smd_id qcom_smd_wlan_ctrl_match[] = {
+ { .name = "WLAN_CTRL" },
+ {}
+};
+
+static struct qcom_smd_driver qcom_smd_wlan_ctrl_driver = {
+ .probe = qcom_smd_wlan_ctrl_probe,
+ .remove = qcom_smd_wlan_ctrl_remove,
+ .callback = qcom_smd_wlan_ctrl_callback,
+ .smd_match_table = qcom_smd_wlan_ctrl_match,
+ .driver = {
+ .name = "qcom_smd_wlan_ctrl",
+ .owner = THIS_MODULE,
+ },
+};
+
+static const struct of_device_id wcn36xx_msm_match_table[] = {
+ { .compatible = "qcom,wcn3660", .data = (void *)WCN36XX_CHIP_3660 },
+ { .compatible = "qcom,wcn3680", .data = (void *)WCN36XX_CHIP_3680 },
+ { .compatible = "qcom,wcn3620", .data = (void *)WCN36XX_CHIP_3620 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcn36xx_msm_match_table);
+
+static int wcn36xx_msm_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *of_id;
+ struct resource *r;
+ struct resource res[3];
+ static const char const *rnames[] = {
+ "wcnss_mmio", "wcnss_wlantx_irq", "wcnss_wlanrx_irq" };
+ static const int rtype[] = {
+ IORESOURCE_MEM, IORESOURCE_IRQ, IORESOURCE_IRQ };
+ struct device_node *dn;
+ int n;
+
+ wcnss_core_prepare(pdev);
+
+ dn = of_parse_phandle(pdev->dev.of_node, "rproc", 0);
+ if (!dn) {
+ dev_err(&pdev->dev, "No rproc specified in DT\n");
+ } else {
+ struct rproc *rproc= rproc_get_by_phandle(dn->phandle);
+ if (rproc)
+ rproc_boot(rproc);
+ else {
+ dev_err(&pdev->dev, "No rproc registered\n");
+ }
+ }
+
+ qcom_smd_driver_register(&qcom_smd_wlan_ctrl_driver);
+ wcnss_core_init();
+
+ of_id = of_match_node(wcn36xx_msm_match_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ wcn36xx_data.chip_type = (enum wcn36xx_chip_type)of_id->data;
+
+ wcn36xx_data.core = platform_device_alloc("wcn36xx", -1);
+
+ for (n = 0; n < ARRAY_SIZE(rnames); n++) {
+ r = platform_get_resource_byname(pdev, rtype[n], rnames[n]);
+ if (!r) {
+ dev_err(&wcn36xx_data.core->dev,
+ "Missing resource %s'\n", rnames[n]);
+ ret = -ENOMEM;
+ return ret;
+ }
+ res[n] = *r;
+ }
+
+ platform_device_add_resources(wcn36xx_data.core, res, n);
+ wcn36xx_data.core->dev.platform_data = &wcn36xx_data;
+
+ platform_device_add(wcn36xx_data.core);
+
+ dev_info(&pdev->dev, "%s initialized\n", __func__);
+
+ return 0;
+}
+
+static int wcn36xx_msm_remove(struct platform_device *pdev)
+{
+ platform_device_del(wcn36xx_data.core);
+ platform_device_put(wcn36xx_data.core);
+ return 0;
+}
+
+static struct platform_driver wcn36xx_msm_driver = {
+ .probe = wcn36xx_msm_probe,
+ .remove = wcn36xx_msm_remove,
+ .driver = {
+ .name = "wcn36xx-msm",
+ .owner = THIS_MODULE,
+ .of_match_table = wcn36xx_msm_match_table,
+ },
+};
+
+static int __init wcn36xx_msm_init(void)
+{
+ return platform_driver_register(&wcn36xx_msm_driver);
+}
+module_init(wcn36xx_msm_init);
+
+static void __exit wcn36xx_msm_exit(void)
+{
+ platform_driver_unregister(&wcn36xx_msm_driver);
+}
+module_exit(wcn36xx_msm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Eugene Krasnikov k.eugene.e@gmail.com");
+MODULE_FIRMWARE(MAC_ADDR_0);
+
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 7b41e833e18c..f88cf7e8715b 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -103,19 +103,49 @@ struct nv_data {
u8 table;
};
+enum wcn36xx_chip_type {
+ WCN36XX_CHIP_UNKNOWN,
+ WCN36XX_CHIP_3660,
+ WCN36XX_CHIP_3680,
+ WCN36XX_CHIP_3620,
+};
+
/* Interface for platform control path
*
* @open: hook must be called when wcn36xx wants to open control channel.
* @tx: sends a buffer.
*/
struct wcn36xx_platform_ctrl_ops {
- int (*open)(void *drv_priv, void *rsp_cb);
- void (*close)(void);
- int (*tx)(char *buf, size_t len);
- int (*get_hw_mac)(u8 *addr);
+ int (*open)(struct wcn36xx *wcn, void *rsp_cb);
+ void (*close)(struct wcn36xx *wcn);
+ int (*tx)(struct wcn36xx *wcn, char *buf, size_t len);
+ int (*get_hw_mac)(struct wcn36xx *wcn, u8 *addr);
+ int (*get_chip_type)(struct wcn36xx *wcn);
int (*smsm_change_state)(u32 clear_mask, u32 set_mask);
};
+struct wcn36xx_platform_data {
+ enum wcn36xx_chip_type chip_type;
+
+ struct platform_device *core;
+
+ struct qcom_smd_device *sdev;
+ struct qcom_smd_channel *wlan_ctrl_channel;
+ struct completion wlan_ctrl_ack;
+ struct mutex wlan_ctrl_lock;
+
+ struct pinctrl *pinctrl;
+
+ struct wcn36xx *wcn;
+
+ void (*cb)(struct wcn36xx *wcn, void *buf, size_t len);
+ struct wcn36xx_platform_ctrl_ops ctrl_ops;
+
+ struct work_struct packet_process_work;
+ spinlock_t packet_lock;
+ struct list_head packet_list;
+};
+
/**
* struct wcn36xx_vif - holds VIF related fields
*
@@ -193,7 +223,7 @@ struct wcn36xx {
u8 fw_minor;
u8 fw_major;
u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
- u32 chip_version;
+ enum wcn36xx_chip_type chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -204,6 +234,7 @@ struct wcn36xx {
int rx_irq;
void __iomem *mmio;
+ struct wcn36xx_platform_data *wcn36xx_data;
struct wcn36xx_platform_ctrl_ops *ctrl_ops;
/*
* smd_buf must be protected with smd_mutex to garantee
@@ -241,9 +272,6 @@ struct wcn36xx {
};
-#define WCN36XX_CHIP_3660 0
-#define WCN36XX_CHIP_3680 1
-
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wcn36xx/wcnss_core.c b/drivers/net/wireless/ath/wcn36xx/wcnss_core.c
new file mode 100644
index 000000000000..e7d389e87886
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcnss_core.c
@@ -0,0 +1,295 @@
+#include <linux/completion.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/clk.h>
+#include <linux/soc/qcom/smd.h>
+#include "wcn36xx.h"
+#include "wcnss_core.h"
+
+#define WCNSS_CTRL_TIMEOUT (msecs_to_jiffies(500))
+
+static int wcnss_core_config(struct platform_device *pdev, void __iomem *base)
+{
+ int ret = 0;
+ u32 value, iris_read_v = INVALID_IRIS_REG;
+ int clk_48m = 0;
+
+ value = readl_relaxed(base + SPARE_OFFSET);
+ value |= WCNSS_FW_DOWNLOAD_ENABLE;
+ writel_relaxed(value, base + SPARE_OFFSET);
+
+ writel_relaxed(0, base + PMU_OFFSET);
+ value = readl_relaxed(base + PMU_OFFSET);
+ value |= WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_EN;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ iris_read_v = readl_relaxed(base + IRIS_REG_OFFSET);
+ pr_info("iris_read_v: 0x%x\n", iris_read_v);
+
+ iris_read_v &= 0xffff;
+ iris_read_v |= 0x04;
+ writel_relaxed(iris_read_v, base + IRIS_REG_OFFSET);
+
+ value = readl_relaxed(base + PMU_OFFSET);
+ value |= WCNSS_PMU_CFG_IRIS_XO_READ;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_XO_READ_STS)
+ cpu_relax();
+
+ iris_read_v = readl_relaxed(base + 0x1134);
+ pr_info("wcnss: IRIS Reg: 0x%08x\n", iris_read_v);
+ clk_48m = (iris_read_v >> 30) ? 0 : 1;
+ value &= ~WCNSS_PMU_CFG_IRIS_XO_READ;
+
+ /* XO_MODE[b2:b1]. Clear implies 19.2MHz */
+ value &= ~WCNSS_PMU_CFG_IRIS_XO_MODE;
+
+ if (clk_48m)
+ value |= WCNSS_PMU_CFG_IRIS_XO_MODE_48;
+
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* Reset IRIS */
+ value |= WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_RESET_STS)
+ cpu_relax();
+
+ /* reset IRIS reset bit */
+ value &= ~WCNSS_PMU_CFG_IRIS_RESET;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* start IRIS XO configuration */
+ value |= WCNSS_PMU_CFG_IRIS_XO_CFG;
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ /* Wait for XO configuration to finish */
+ while (readl_relaxed(base + PMU_OFFSET) &
+ WCNSS_PMU_CFG_IRIS_XO_CFG_STS)
+ cpu_relax();
+
+ /* Stop IRIS XO configuration */
+ value &= ~(WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP |
+ WCNSS_PMU_CFG_IRIS_XO_CFG);
+ writel_relaxed(value, base + PMU_OFFSET);
+
+ msleep(200);
+
+ return ret;
+}
+
+int wcnss_core_prepare(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *wcnss_base;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "pronto_phy_base");
+ if (!res) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "resource pronto_phy_base failed\n");
+ return ret;
+ }
+
+ wcnss_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcnss_base)) {
+ dev_err(&pdev->dev, "pronto_phy_base map failed\n");
+ return PTR_ERR(wcnss_base);
+ }
+
+ ret = wcnss_core_config(pdev, wcnss_base);
+ return ret;
+}
+
+struct wcnss_platform_data {
+ struct qcom_smd_channel *channel;
+ struct completion ack;
+ struct mutex lock;
+
+ struct work_struct rx_work;
+ struct work_struct download_work;
+
+ struct qcom_smd_device *sdev;
+};
+
+static struct completion fw_ready_compl;
+#define NV_FILE_NAME "wlan/prima/WCNSS_qcom_wlan_nv.bin"
+static void wcn36xx_nv_download_work(struct work_struct *worker)
+{
+ int ret = 0, i;
+ const struct firmware *nv = NULL;
+ struct wcnss_platform_data *wcnss_data =
+ container_of(worker, struct wcnss_platform_data, download_work);
+ struct device *dev = &wcnss_data->sdev->dev;
+
+ struct nvbin_dnld_req_msg *msg;
+ const void *nv_blob_start;
+ char *pkt = NULL;
+ int nv_blob_size = 0, fragments;
+
+ ret = request_firmware(&nv, NV_FILE_NAME, dev);
+ if (ret || !nv || !nv->data || !nv->size) {
+ dev_err(dev, "request firmware for %s (ret = %d)\n",
+ NV_FILE_NAME, ret);
+ return;
+ }
+
+ nv_blob_start = nv->data + 4;
+ nv_blob_size = nv->size -4;
+
+ fragments = (nv_blob_size + NV_FRAGMENT_SIZE - 1)/NV_FRAGMENT_SIZE;
+
+ pkt = kzalloc(sizeof(struct nvbin_dnld_req_msg) + NV_FRAGMENT_SIZE,
+ GFP_KERNEL);
+ if (!pkt) {
+ dev_err(dev, "allocation packet for nv download failed\n");
+ release_firmware(nv);
+ }
+
+ msg = (struct nvbin_dnld_req_msg *)pkt;
+ msg->hdr.msg_type = WCNSS_NV_DOWNLOAD_REQ;
+ msg->dnld_req_params.msg_flags = 0;
+
+ i = 0;
+ do {
+ int pkt_len = 0;
+
+ msg->dnld_req_params.frag_number = i;
+ if (nv_blob_size > NV_FRAGMENT_SIZE) {
+ msg->dnld_req_params.msg_flags &=
+ ~LAST_FRAGMENT;
+ pkt_len = NV_FRAGMENT_SIZE;
+ } else {
+ pkt_len = nv_blob_size;
+ msg->dnld_req_params.msg_flags |=
+ LAST_FRAGMENT | CAN_RECEIVE_CALDATA;
+ }
+
+ msg->dnld_req_params.nvbin_buffer_size = pkt_len;
+ msg->hdr.msg_len =
+ sizeof(struct nvbin_dnld_req_msg) + pkt_len;
+
+ memcpy(pkt + sizeof(struct nvbin_dnld_req_msg),
+ nv_blob_start + i * NV_FRAGMENT_SIZE, pkt_len);
+
+ ret = qcom_smd_send(wcnss_data->channel, pkt, msg->hdr.msg_len);
+ if (ret) {
+ dev_err(dev, "nv download failed\n");
+ goto out;
+ }
+
+ i++;
+ nv_blob_size -= NV_FRAGMENT_SIZE;
+ msleep(100);
+ } while (nv_blob_size > 0);
+
+out:
+ kfree(pkt);
+ release_firmware(nv);
+ return;
+}
+
+static int qcom_smd_wcnss_ctrl_callback(struct qcom_smd_device *qsdev,
+ const void *data,
+ size_t count)
+{
+ struct wcnss_platform_data *wcnss_data = dev_get_drvdata(&qsdev->dev);
+ struct smd_msg_hdr phdr;
+ const unsigned char *tmp = data;
+
+ memcpy_fromio(&phdr, data, sizeof(struct smd_msg_hdr));
+
+ switch (phdr.msg_type) {
+ /* CBC COMPLETE means firmware ready for go */
+ case WCNSS_CBC_COMPLETE_IND:
+ complete(&fw_ready_compl);
+ pr_info("wcnss: received WCNSS_CBC_COMPLETE_IND from FW\n");
+ break;
+
+ case WCNSS_NV_DOWNLOAD_RSP:
+ pr_info("fw_status: %d\n", tmp[sizeof(struct smd_msg_hdr)]);
+ break;
+ }
+
+ complete(&wcnss_data->ack);
+ return 0;
+}
+
+static int qcom_smd_wcnss_ctrl_probe(struct qcom_smd_device *sdev)
+{
+ struct wcnss_platform_data *wcnss_data;
+
+ wcnss_data = devm_kzalloc(&sdev->dev, sizeof(*wcnss_data), GFP_KERNEL);
+ if (!wcnss_data)
+ return -ENOMEM;
+
+ mutex_init(&wcnss_data->lock);
+ init_completion(&wcnss_data->ack);
+
+ wcnss_data->sdev = sdev;
+
+ dev_set_drvdata(&sdev->dev, wcnss_data);
+ wcnss_data->channel = sdev->channel;
+
+ INIT_WORK(&wcnss_data->download_work, wcn36xx_nv_download_work);
+
+ of_platform_populate(sdev->dev.of_node, NULL, NULL, &sdev->dev);
+
+ /* We are ready for download here */
+ schedule_work(&wcnss_data->download_work);
+ return 0;
+}
+
+static void qcom_smd_wcnss_ctrl_remove(struct qcom_smd_device *sdev)
+{
+ of_platform_depopulate(&sdev->dev);
+}
+
+static const struct qcom_smd_id qcom_smd_wcnss_ctrl_match[] = {
+ { .name = "WCNSS_CTRL" },
+ {}
+};
+
+static struct qcom_smd_driver qcom_smd_wcnss_ctrl_driver = {
+ .probe = qcom_smd_wcnss_ctrl_probe,
+ .remove = qcom_smd_wcnss_ctrl_remove,
+ .callback = qcom_smd_wcnss_ctrl_callback,
+ .smd_match_table = qcom_smd_wcnss_ctrl_match,
+ .driver = {
+ .name = "qcom_smd_wcnss_ctrl",
+ .owner = THIS_MODULE,
+ },
+};
+
+void wcnss_core_init(void)
+{
+ int ret = 0;
+
+ init_completion(&fw_ready_compl);
+ qcom_smd_driver_register(&qcom_smd_wcnss_ctrl_driver);
+
+ ret = wait_for_completion_interruptible_timeout(
+ &fw_ready_compl, msecs_to_jiffies(FW_READY_TIMEOUT));
+ if (ret <= 0) {
+ pr_err("timeout waiting for wcnss firmware ready indicator\n");
+ return;
+ }
+
+ return;
+}
+
+void wcnss_core_deinit(void)
+{
+ qcom_smd_driver_unregister(&qcom_smd_wcnss_ctrl_driver);
+}
diff --git a/drivers/net/wireless/ath/wcn36xx/wcnss_core.h b/drivers/net/wireless/ath/wcn36xx/wcnss_core.h
new file mode 100644
index 000000000000..49524c8dddfb
--- /dev/null
+++ b/drivers/net/wireless/ath/wcn36xx/wcnss_core.h
@@ -0,0 +1,99 @@
+#ifndef _WCNSS_CORE_H_
+#define _WCNSS_CORE_H_
+
+#define PMU_OFFSET 0x1004
+#define SPARE_OFFSET 0x1088
+#define IRIS_REG_OFFSET 0x1134
+
+#define INVALID_IRIS_REG 0xbaadbaad
+
+#define WCNSS_PMU_CFG_IRIS_XO_CFG BIT(3)
+#define WCNSS_PMU_CFG_IRIS_XO_EN BIT(4)
+#define WCNSS_PMU_CFG_GC_BUS_MUX_SEL_TOP BIT(5)
+#define WCNSS_PMU_CFG_IRIS_XO_CFG_STS BIT(6) /* 1: in progress, 0: done */
+
+#define WCNSS_PMU_CFG_IRIS_RESET BIT(7)
+#define WCNSS_PMU_CFG_IRIS_RESET_STS BIT(8) /* 1: in progress, 0: done */
+#define WCNSS_PMU_CFG_IRIS_XO_READ BIT(9)
+#define WCNSS_PMU_CFG_IRIS_XO_READ_STS BIT(10)
+#define WCNSS_FW_DOWNLOAD_ENABLE BIT(25)
+
+#define WCNSS_PMU_CFG_IRIS_XO_MODE 0x6
+#define WCNSS_PMU_CFG_IRIS_XO_MODE_48 (3 << 1)
+
+#define NV_DOWNLOAD_TIMEOUT 500
+#define NV_FRAGMENT_SIZE 3072
+#define MAX_CALIBRATED_DATA_SIZE (64*1024)
+#define LAST_FRAGMENT (1 << 0)
+#define MESSAGE_TO_FOLLOW (1 << 1)
+#define CAN_RECEIVE_CALDATA (1 << 15)
+#define WCNSS_RESP_SUCCESS 1
+#define WCNSS_RESP_FAIL 0
+
+
+#define WCNSS_NV_DOWNLOAD_REQ 0x01000002
+#define WCNSS_NV_DOWNLOAD_RSP 0x01000003
+#define WCNSS_CBC_COMPLETE_IND 0x0100000C
+
+/*time out 10s for the firmware status ready indicator */
+#define FW_READY_TIMEOUT (10000)
+
+
+struct smd_msg_hdr {
+ unsigned int msg_type;
+ unsigned int msg_len;
+};
+
+struct nvbin_dnld_req_params {
+ /* Fragment sequence number of the NV bin Image. NV Bin Image
+ * might not fit into one message due to size limitation of
+ * the SMD channel FIFO so entire NV blob is chopped into
+ * multiple fragments starting with seqeunce number 0. The
+ * last fragment is indicated by marking is_last_fragment field
+ * to 1. At receiving side, NV blobs would be concatenated
+ * together without any padding bytes in between.
+ */
+ unsigned short frag_number;
+
+ /* bit 0: When set to 1 it indicates that no more fragments will
+ * be sent.
+ * bit 1: When set, a new message will be followed by this message
+ * bit 2- bit 14: Reserved
+ * bit 15: when set, it indicates that the sender is capable of
+ * receiving Calibrated data.
+ */
+ unsigned short msg_flags;
+
+ /* NV Image size (number of bytes) */
+ unsigned int nvbin_buffer_size;
+
+ /* Following the 'nvbin_buffer_size', there should be
+ * nvbin_buffer_size bytes of NV bin Image i.e.
+ * uint8[nvbin_buffer_size].
+ */
+};
+
+struct nvbin_dnld_req_msg {
+ /* Note: The length specified in nvbin_dnld_req_msg messages
+ * should be hdr.msg_len = sizeof(nvbin_dnld_req_msg) +
+ * nvbin_buffer_size.
+ */
+ struct smd_msg_hdr hdr;
+ struct nvbin_dnld_req_params dnld_req_params;
+};
+
+struct wcnss_version {
+ struct smd_msg_hdr hdr;
+ unsigned char major;
+ unsigned char minor;
+ unsigned char version;
+ unsigned char revision;
+};
+
+
+int wcnss_core_prepare(struct platform_device *pdev);
+void wcnss_core_init(void);
+void wcnss_core_deinit(void);
+
+#endif
+
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 384574c3987c..dbfab80f4d36 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -924,8 +924,8 @@ EXPORT_SYMBOL(of_io_request_and_map);
* CPU addr (phys_addr_t) : pna cells
* size : nsize cells
*
- * It returns -ENODEV if "dma-ranges" property was not found
- * for this device in DT.
+ * Return 0 on success, -ENODEV if the "dma-ranges" property was not found for
+ * this device in DT, or -EINVAL if the CPU address or size is invalid.
*/
int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
{
@@ -986,6 +986,22 @@ int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *siz
*dma_addr = dmaaddr;
*size = of_read_number(ranges + naddr + pna, nsize);
+ /*
+ * DT nodes sometimes incorrectly set the size as a mask. Work around
+ * those incorrect DT by computing the size as mask + 1.
+ */
+ if (*size & 1) {
+ pr_warn("%s: size 0x%llx for dma-range in node(%s) set as mask\n",
+ __func__, *size, np->full_name);
+ *size = *size + 1;
+ }
+
+ if (!*size) {
+ pr_err("%s: invalid size zero for dma-range in node(%s)\n",
+ __func__, np->full_name);
+ ret = -EINVAL;
+ goto out;
+ }
pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
*dma_addr, *paddr, *size);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index 8b91ea241b10..c4a2869c2cb0 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -70,7 +70,7 @@ int of_device_add(struct platform_device *ofdev)
}
/**
- * of_dma_configure - Setup DMA configuration
+ * of_dma_configure - Setup DMA masks and offset
* @dev: Device to apply DMA configuration
* @np: Pointer to OF node having DMA configuration
*
@@ -81,13 +81,11 @@ int of_device_add(struct platform_device *ofdev)
* can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events
* to fix up DMA configuration.
*/
-void of_dma_configure(struct device *dev, struct device_node *np)
+void of_dma_configure_masks(struct device *dev, struct device_node *np)
{
- u64 dma_addr, paddr, size;
- int ret;
- bool coherent;
+ u64 dma_addr, paddr, size, range_mask;
unsigned long offset;
- struct iommu_ops *iommu;
+ int ret;
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
@@ -105,25 +103,11 @@ void of_dma_configure(struct device *dev, struct device_node *np)
ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
if (ret < 0) {
- dma_addr = offset = 0;
- size = dev->coherent_dma_mask + 1;
+ range_mask = dev->coherent_dma_mask + 1;
+ offset = 0;
} else {
+ range_mask = DMA_BIT_MASK(ilog2(dma_addr + size));
offset = PFN_DOWN(paddr - dma_addr);
-
- /*
- * Add a work around to treat the size as mask + 1 in case
- * it is defined in DT as a mask.
- */
- if (size & 1) {
- dev_warn(dev, "Invalid size 0x%llx for dma-range\n",
- size);
- size = size + 1;
- }
-
- if (!size) {
- dev_err(dev, "Adjusted size 0x%llx invalid\n", size);
- return;
- }
dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
@@ -133,22 +117,59 @@ void of_dma_configure(struct device *dev, struct device_node *np)
* Limit coherent and dma mask based on size and default mask
* set by the driver.
*/
- dev->coherent_dma_mask = min(dev->coherent_dma_mask,
- DMA_BIT_MASK(ilog2(dma_addr + size)));
- *dev->dma_mask = min((*dev->dma_mask),
- DMA_BIT_MASK(ilog2(dma_addr + size)));
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, range_mask);
+ *dev->dma_mask = min((*dev->dma_mask), range_mask);
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_masks);
+
+/**
+ * of_dma_configure_ops - Setup DMA operations
+ * @dev: Device to apply DMA configuration
+ * @np: Pointer to OF node having DMA configuration
+ *
+ * Try to get devices's DMA configuration from DT and update it
+ * accordingly.
+ */
+int of_dma_configure_ops(struct device *dev, struct device_node *np)
+{
+ u64 dma_addr, paddr, size;
+ struct iommu_ops *iommu;
+ bool coherent;
+ int ret;
+
+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size);
+ if (ret < 0) {
+ dma_addr = 0;
+ size = dev->coherent_dma_mask + 1;
+ }
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
coherent ? " " : " not ");
iommu = of_iommu_configure(dev, np);
+ if (IS_ERR(iommu))
+ return PTR_ERR(iommu);
dev_dbg(dev, "device is%sbehind an iommu\n",
iommu ? " " : " not ");
arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_dma_configure_ops);
+
+/**
+ * of_dma_deconfigure - Clean up DMA configuration
+ * @dev: Device for which to clean up DMA configuration
+ *
+ * Clean up all configuration performed by of_dma_configure_ops() and free all
+ * resources that have been allocated.
+ */
+void of_dma_deconfigure(struct device *dev)
+{
+ arch_teardown_dma_ops(dev);
}
-EXPORT_SYMBOL_GPL(of_dma_configure);
int of_device_register(struct platform_device *pdev)
{
diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c
index 5751dc5b6494..c65803da2067 100644
--- a/drivers/of/of_pci.c
+++ b/drivers/of/of_pci.c
@@ -132,7 +132,8 @@ void of_pci_dma_configure(struct pci_dev *pci_dev)
if (!bridge->parent)
return;
- of_dma_configure(dev, bridge->parent->of_node);
+ of_dma_configure_masks(dev, bridge->parent->of_node);
+ of_dma_configure_ops(dev, bridge->parent->of_node);
pci_put_host_bridge_device(bridge);
}
EXPORT_SYMBOL_GPL(of_pci_dma_configure);
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 1001efaedcb8..338b6744ff1e 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -150,11 +150,6 @@ struct platform_device *of_device_alloc(struct device_node *np,
}
EXPORT_SYMBOL(of_device_alloc);
-static void of_dma_deconfigure(struct device *dev)
-{
- arch_teardown_dma_ops(dev);
-}
-
/**
* of_platform_device_create_pdata - Alloc, initialize and register an of_device
* @np: pointer to node to create device for
@@ -183,11 +178,10 @@ static struct platform_device *of_platform_device_create_pdata(
dev->dev.bus = &platform_bus_type;
dev->dev.platform_data = platform_data;
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
of_msi_configure(&dev->dev, dev->dev.of_node);
if (of_device_add(dev) != 0) {
- of_dma_deconfigure(&dev->dev);
platform_device_put(dev);
goto err_clear_flag;
}
@@ -247,7 +241,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(&dev->dev);
- of_dma_configure(&dev->dev, dev->dev.of_node);
+ of_dma_configure_masks(&dev->dev, dev->dev.of_node);
+ of_dma_configure_ops(&dev->dev, dev->dev.of_node);
/* Allow the HW Peripheral ID to be overridden */
prop = of_get_property(node, "arm,primecell-periphid", NULL);
@@ -479,11 +474,12 @@ static int of_platform_device_destroy(struct device *dev, void *data)
if (dev->bus == &platform_bus_type)
platform_device_unregister(to_platform_device(dev));
#ifdef CONFIG_ARM_AMBA
- else if (dev->bus == &amba_bustype)
+ else if (dev->bus == &amba_bustype) {
amba_device_unregister(to_amba_device(dev));
+ of_dma_deconfigure(dev);
+ }
#endif
- of_dma_deconfigure(dev);
of_node_clear_flag(dev->of_node, OF_POPULATED);
of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
return 0;
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index d5e58bae95cf..f87b1c98292f 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -30,6 +30,11 @@ config PCI_IMX6
select PCIEPORTBUS
select PCIE_DW
+config PCI_QCOM
+ bool "Qualcomm PCIe controller"
+ default y
+ depends on ARCH_QCOM && !ARM64
+
config PCI_TEGRA
bool "NVIDIA Tegra PCIe controller"
depends on ARCH_TEGRA && !ARM64
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 140d66f796e4..451d49e3df44 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+obj-$(CONFIG_PCI_QCOM) += pci-qcom.o
diff --git a/drivers/pci/host/pci-qcom.c b/drivers/pci/host/pci-qcom.c
new file mode 100644
index 000000000000..5a39f6368aa3
--- /dev/null
+++ b/drivers/pci/host/pci-qcom.c
@@ -0,0 +1,967 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * QCOM MSM PCIe controller driver.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_gpio.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#define INT_PCI_MSI_NR (8 * 32)
+#define MSM_PCIE_MSI_PHY 0xa0000000
+
+#define PCIE20_MSI_CTRL_ADDR (0x820)
+#define PCIE20_MSI_CTRL_UPPER_ADDR (0x824)
+#define PCIE20_MSI_CTRL_INTR_EN (0x828)
+#define PCIE20_MSI_CTRL_INTR_MASK (0x82C)
+#define PCIE20_MSI_CTRL_INTR_STATUS (0x830)
+
+#define PCIE20_MSI_CTRL_MAX 8
+/* Root Complex Port vendor/device IDs */
+#define PCIE_VENDOR_ID_RCP 0x17cb
+#define PCIE_DEVICE_ID_RCP 0x0101
+
+#define __set(v, a, b) (((v) << (b)) & GENMASK(a, b))
+
+#define PCIE20_PARF_PCS_DEEMPH 0x34
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(x) __set(x, 21, 16)
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) __set(x, 13, 8)
+#define PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) __set(x, 5, 0)
+
+#define PCIE20_PARF_PCS_SWING 0x38
+#define PCIE20_PARF_PCS_SWING_TX_SWING_FULL(x) __set(x, 14, 8)
+#define PCIE20_PARF_PCS_SWING_TX_SWING_LOW(x) __set(x, 6, 0)
+
+#define PCIE20_PARF_PHY_CTRL 0x40
+#define PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(x) __set(x, 20, 16)
+#define PCIE20_PARF_PHY_CTRL_PHY_LOS_LEVEL(x) __set(x, 12, 8)
+#define PCIE20_PARF_PHY_CTRL_PHY_RTUNE_REQ (1 << 4)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BURNIN (1 << 2)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_BYPASS (1 << 1)
+#define PCIE20_PARF_PHY_CTRL_PHY_TEST_PWR_DOWN (1 << 0)
+
+#define PCIE20_PARF_PHY_REFCLK 0x4C
+#define PCIE20_PARF_CONFIG_BITS 0x50
+
+#define PCIE20_ELBI_SYS_CTRL 0x04
+#define PCIE20_ELBI_SYS_CTRL_LTSSM_EN 0x01
+
+#define PCIE20_CAP 0x70
+#define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
+
+#define PCIE20_COMMAND_STATUS 0x04
+#define PCIE20_BUSNUMBERS 0x18
+#define PCIE20_MEMORY_BASE_LIMIT 0x20
+
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818
+#define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c
+#define PCIE20_PLR_IATU_VIEWPORT 0x900
+#define PCIE20_PLR_IATU_CTRL1 0x904
+#define PCIE20_PLR_IATU_CTRL2 0x908
+#define PCIE20_PLR_IATU_LBAR 0x90C
+#define PCIE20_PLR_IATU_UBAR 0x910
+#define PCIE20_PLR_IATU_LAR 0x914
+#define PCIE20_PLR_IATU_LTAR 0x918
+#define PCIE20_PLR_IATU_UTAR 0x91c
+
+#define MSM_PCIE_DEV_CFG_ADDR 0x01000000
+
+#define RD 0
+#define WR 1
+
+#define MAX_RC_NUM 3
+#define PCIE_BUS_PRIV_DATA(pdev) \
+ (((struct pci_sys_data *)pdev->bus->sysdata)->private_data)
+
+/* PCIe TLP types that we are interested in */
+#define PCI_CFG0_RDWR 0x4
+#define PCI_CFG1_RDWR 0x5
+
+#define readl_poll_timeout(addr, val, cond, sleep_us, timeout_us) \
+({ \
+ unsigned long timeout = jiffies + usecs_to_jiffies(timeout_us); \
+ might_sleep_if(timeout_us); \
+ for (;;) { \
+ (val) = readl(addr); \
+ if (cond) \
+ break; \
+ if (timeout_us && time_after(jiffies, timeout)) { \
+ (val) = readl(addr); \
+ break; \
+ } \
+ if (sleep_us) \
+ usleep_range(DIV_ROUND_UP(sleep_us, 4), sleep_us); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+})
+
+struct qcom_msi {
+ struct msi_controller chip;
+ DECLARE_BITMAP(used, INT_PCI_MSI_NR);
+ struct irq_domain *domain;
+ unsigned long pages;
+ struct mutex lock;
+ int irq;
+};
+
+struct qcom_pcie {
+ void __iomem *elbi_base;
+ void __iomem *parf_base;
+ void __iomem *dwc_base;
+ void __iomem *cfg_base;
+ struct device *dev;
+ int reset_gpio;
+ bool ext_phy_ref_clk;
+ struct clk *iface_clk;
+ struct clk *bus_clk;
+ struct clk *phy_clk;
+ int irq_int[4];
+ struct reset_control *axi_reset;
+ struct reset_control *ahb_reset;
+ struct reset_control *por_reset;
+ struct reset_control *pci_reset;
+ struct reset_control *phy_reset;
+
+ struct resource conf;
+ struct resource io;
+ struct resource mem;
+
+ struct regulator *vdd_supply;
+ struct regulator *avdd_supply;
+ struct regulator *pcie_clk_supply;
+ struct regulator *pcie_ext3p3v_supply;
+
+ struct qcom_msi msi;
+};
+
+static int nr_controllers;
+static DEFINE_SPINLOCK(qcom_hw_pci_lock);
+
+static inline struct qcom_pcie *sys_to_pcie(struct pci_sys_data *sys)
+{
+ return sys->private_data;
+}
+
+
+inline int is_msm_pcie_rc(struct pci_bus *bus)
+{
+ return (bus->number == 0);
+}
+
+static int qcom_pcie_is_link_up(struct qcom_pcie *dev)
+{
+ return readl_relaxed(dev->dwc_base + PCIE20_CAP_LINKCTRLSTATUS) &
+ BIT(29);
+}
+
+inline int msm_pcie_get_cfgtype(struct pci_bus *bus)
+{
+ /*
+ * http://www.tldp.org/LDP/tlk/dd/pci.html
+ * Pass it onto the secondary bus interface unchanged if the
+ * bus number specified is greater than the secondary bus
+ * number and less than or equal to the subordinate bus
+ * number.
+ *
+ * Read/Write to the RC and Device/Switch connected to the RC
+ * are CFG0 type transactions. Rest have to be forwarded
+ * down stream as CFG1 transactions.
+ *
+ */
+ if (bus->number == 0)
+ return PCI_CFG0_RDWR;
+
+ return PCI_CFG0_RDWR;
+}
+
+void msm_pcie_config_cfgtype(struct pci_bus *bus, u32 devfn)
+{
+ uint32_t bdf, cfgtype;
+ struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
+
+ cfgtype = msm_pcie_get_cfgtype(bus);
+
+ if (cfgtype == PCI_CFG0_RDWR) {
+ bdf = MSM_PCIE_DEV_CFG_ADDR;
+ } else {
+ /*
+ * iATU Lower Target Address Register
+ * Bits Description
+ * *-1:0 Forms bits [*:0] of the
+ * start address of the new
+ * address of the translated
+ * region. The start address
+ * must be aligned to a
+ * CX_ATU_MIN_REGION_SIZE kB
+ * boundary, so these bits are
+ * always 0. A write to this
+ * location is ignored by the
+ * PCIe core.
+ * 31:*1 Forms bits [31:*] of the of
+ * the new address of the
+ * translated region.
+ *
+ * * is log2(CX_ATU_MIN_REGION_SIZE)
+ */
+ bdf = (((bus->number & 0xff) << 24) & 0xff000000) |
+ (((devfn & 0xff) << 16) & 0x00ff0000);
+ }
+
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ wmb();
+
+ /* Program Bdf Address */
+ writel_relaxed(bdf, dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ wmb();
+
+ /* Write Config Request Type */
+ writel_relaxed(cfgtype, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ wmb();
+}
+
+static inline int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
+ int where, int size, u32 *val)
+{
+ uint32_t word_offset, byte_offset, mask;
+ uint32_t rd_val, wr_val;
+ struct qcom_pcie *dev = sys_to_pcie(bus->sysdata);
+ void __iomem *config_base;
+ int rc;
+
+ rc = is_msm_pcie_rc(bus);
+
+ /*
+ * For downstream bus, make sure link is up
+ */
+ if (rc && (devfn != 0)) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ } else if ((!rc) && (!qcom_pcie_is_link_up(dev))) {
+ *val = ~0;
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ }
+
+ msm_pcie_config_cfgtype(bus, devfn);
+
+ word_offset = where & ~0x3;
+ byte_offset = where & 0x3;
+ mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
+
+ config_base = (rc) ? dev->dwc_base : dev->cfg_base;
+ rd_val = readl_relaxed(config_base + word_offset);
+
+ if (oper == RD) {
+ *val = ((rd_val & mask) >> (8 * byte_offset));
+ } else {
+ wr_val = (rd_val & ~mask) |
+ ((*val << (8 * byte_offset)) & mask);
+ writel_relaxed(wr_val, config_base + word_offset);
+ wmb(); /* ensure config data is written to hardware register */
+ }
+
+ return 0;
+}
+
+static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+ int size, u32 *val)
+{
+ return msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
+}
+
+static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+ int where, int size, u32 val)
+{
+ /*
+ *Attempt to reset secondary bus is causing PCIE core to reset.
+ *Disable secondary bus reset functionality.
+ */
+ if ((bus->number == 0) && (where == PCI_BRIDGE_CONTROL) &&
+ (val & PCI_BRIDGE_CTL_BUS_RESET)) {
+ pr_info("PCIE secondary bus reset not supported\n");
+ val &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ }
+
+ return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
+}
+
+static int qcom_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ struct qcom_pcie *pcie_dev = PCIE_BUS_PRIV_DATA(dev);
+
+ return pcie_dev->irq_int[pin-1];
+}
+
+static int qcom_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct qcom_pcie *qcom_pcie = sys->private_data;
+
+ /*
+ * specify linux PCI framework to allocate device memory (BARs)
+ * from msm_pcie_dev.dev_mem_res resource.
+ */
+ sys->mem_offset = 0;
+ sys->io_offset = 0;
+
+ pci_add_resource(&sys->resources, &qcom_pcie->mem);
+ pci_add_resource(&sys->resources, &qcom_pcie->io);
+
+ return 1;
+}
+
+static struct pci_ops qcom_pcie_ops = {
+ .read = msm_pcie_rd_conf,
+ .write = msm_pcie_wr_conf,
+};
+
+static struct hw_pci qcom_hw_pci[MAX_RC_NUM] = {
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+ {
+ .ops = &qcom_pcie_ops,
+ .nr_controllers = 1,
+ .swizzle = pci_common_swizzle,
+ .setup = qcom_pcie_setup,
+ .map_irq = qcom_pcie_map_irq,
+ },
+};
+
+static inline void qcom_elbi_writel_relaxed(struct qcom_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->elbi_base + reg);
+}
+
+static inline u32 qcom_elbi_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->elbi_base + reg);
+}
+
+static inline void qcom_parf_writel_relaxed(struct qcom_pcie *pcie,
+ u32 val, u32 reg)
+{
+ writel_relaxed(val, pcie->parf_base + reg);
+}
+
+static inline u32 qcom_parf_readl_relaxed(struct qcom_pcie *pcie, u32 reg)
+{
+ return readl_relaxed(pcie->parf_base + reg);
+}
+
+static void msm_pcie_write_mask(void __iomem *addr,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ uint32_t val;
+
+ val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
+ writel_relaxed(val, addr);
+ wmb(); /* ensure data is written to hardware register */
+}
+
+static void qcom_pcie_config_controller(struct qcom_pcie *dev)
+{
+ /*
+ * program and enable address translation region 0 (device config
+ * address space); region type config;
+ * axi config address range to device config address range
+ */
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ /* ensure that hardware locks the region before programming it */
+ wmb();
+
+ writel_relaxed(4, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(dev->conf.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
+ writel_relaxed(dev->conf.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
+ writel_relaxed(MSM_PCIE_DEV_CFG_ADDR,
+ dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
+ /* ensure that hardware registers the configuration */
+ wmb();
+
+ /*
+ * program and enable address translation region 2 (device resource
+ * address space); region type memory;
+ * axi device bar address range to device bar address range
+ */
+ writel_relaxed(2, dev->dwc_base + PCIE20_PLR_IATU_VIEWPORT);
+ /* ensure that hardware locks the region before programming it */
+ wmb();
+
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_CTRL1);
+ writel_relaxed(BIT(31), dev->dwc_base + PCIE20_PLR_IATU_CTRL2);
+ writel_relaxed(dev->mem.start, dev->dwc_base + PCIE20_PLR_IATU_LBAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UBAR);
+ writel_relaxed(dev->mem.end, dev->dwc_base + PCIE20_PLR_IATU_LAR);
+ writel_relaxed(dev->mem.start,
+ dev->dwc_base + PCIE20_PLR_IATU_LTAR);
+ writel_relaxed(0, dev->dwc_base + PCIE20_PLR_IATU_UTAR);
+ /* ensure that hardware registers the configuration */
+ wmb();
+
+ /* 1K PCIE buffer setting */
+ writel_relaxed(0x3, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0);
+ writel_relaxed(0x1, dev->dwc_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1);
+ /* ensure that hardware registers the configuration */
+ wmb();
+}
+
+static int qcom_msi_alloc(struct qcom_msi *chip)
+{
+ int msi;
+
+ mutex_lock(&chip->lock);
+
+ msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
+ if (msi < INT_PCI_MSI_NR)
+ set_bit(msi, chip->used);
+ else
+ msi = -ENOSPC;
+
+ mutex_unlock(&chip->lock);
+
+ return msi;
+}
+
+static void qcom_msi_free(struct qcom_msi *chip, unsigned long irq)
+{
+ struct device *dev = chip->chip.dev;
+
+ mutex_lock(&chip->lock);
+
+ if (!test_bit(irq, chip->used))
+ dev_err(dev, "trying to free unused MSI#%lu\n", irq);
+ else
+ clear_bit(irq, chip->used);
+
+ mutex_unlock(&chip->lock);
+}
+
+
+static irqreturn_t handle_msi_irq(int irq, void *data)
+{
+ int i, j, index;
+ unsigned long val;
+ struct qcom_pcie *dev = data;
+ void __iomem *ctrl_status;
+ struct qcom_msi *msi = &dev->msi;
+
+ /* check for set bits, clear it by setting that bit
+ and trigger corresponding irq */
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) {
+ ctrl_status = dev->dwc_base +
+ PCIE20_MSI_CTRL_INTR_STATUS + (i * 12);
+
+ val = readl_relaxed(ctrl_status);
+ while (val) {
+ j = find_first_bit(&val, 32);
+ index = j + (32 * i);
+ writel_relaxed(BIT(j), ctrl_status);
+ /* ensure that interrupt is cleared (acked) */
+ wmb();
+
+ irq = irq_find_mapping(msi->domain, index);
+ if (irq) {
+ if (test_bit(index, msi->used))
+ generic_handle_irq(irq);
+ else
+ dev_info(dev->dev, "unhandled MSI\n");
+ }
+ val = readl_relaxed(ctrl_status);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static inline struct qcom_msi *to_qcom_msi(struct msi_controller *chip)
+{
+ return container_of(chip, struct qcom_msi, chip);
+}
+
+static int qcom_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
+ struct msi_desc *desc)
+{
+ struct qcom_msi *msi = to_qcom_msi(chip);
+ struct msi_msg msg;
+ unsigned int irq;
+ int hwirq;
+
+ hwirq = qcom_msi_alloc(msi);
+ if (hwirq < 0)
+ return hwirq;
+
+ irq = irq_create_mapping(msi->domain, hwirq);
+ if (!irq)
+ return -EINVAL;
+
+ irq_set_msi_desc(irq, desc);
+
+ msg.address_lo = MSM_PCIE_MSI_PHY;
+ /* 32 bit address only */
+ msg.address_hi = 0;
+ msg.data = hwirq;
+
+ write_msi_msg(irq, &msg);
+
+ return 0;
+}
+
+static void qcom_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
+{
+ struct qcom_msi *msi = to_qcom_msi(chip);
+ struct irq_data *d = irq_get_irq_data(irq);
+
+ qcom_msi_free(msi, d->hwirq);
+}
+
+static struct irq_chip qcom_msi_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_enable = unmask_msi_irq,
+ .irq_disable = mask_msi_irq,
+ .irq_mask = mask_msi_irq,
+ .irq_unmask = unmask_msi_irq,
+};
+
+
+static int qcom_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_and_handler(irq, &qcom_msi_irq_chip, handle_simple_irq);
+ irq_set_chip_data(irq, domain->host_data);
+
+ return 0;
+}
+
+
+static const struct irq_domain_ops msi_domain_ops = {
+ .map = qcom_pcie_msi_map,
+};
+uint32_t msm_pcie_msi_init(struct qcom_pcie *pcie, struct platform_device *pdev)
+{
+ int i, rc;
+ struct qcom_msi *msi = &pcie->msi;
+ int err;
+
+ mutex_init(&msi->lock);
+
+ msi->chip.dev = pcie->dev;
+ msi->chip.setup_irq = qcom_msi_setup_irq;
+ msi->chip.teardown_irq = qcom_msi_teardown_irq;
+ msi->domain = irq_domain_add_linear(pdev->dev.of_node, INT_PCI_MSI_NR,
+ &msi_domain_ops, &msi->chip);
+ if (!msi->domain) {
+ dev_err(&pdev->dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
+
+ err = platform_get_irq_byname(pdev, "msi");
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
+ return err;
+ }
+
+ msi->irq = err;
+
+ /* program MSI controller and enable all interrupts */
+ writel_relaxed(MSM_PCIE_MSI_PHY, pcie->dwc_base + PCIE20_MSI_CTRL_ADDR);
+ writel_relaxed(0, pcie->dwc_base + PCIE20_MSI_CTRL_UPPER_ADDR);
+
+ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++)
+ writel_relaxed(~0, pcie->dwc_base +
+ PCIE20_MSI_CTRL_INTR_EN + (i * 12));
+
+ /* ensure that hardware is configured before proceeding */
+ wmb();
+
+ /* register handler for physical MSI interrupt line */
+ rc = request_irq(msi->irq, handle_msi_irq, IRQF_TRIGGER_RISING,
+ "msm_pcie_msi", pcie);
+ if (rc) {
+ pr_err("Unable to allocate msi interrupt\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int qcom_pcie_vreg_on(struct qcom_pcie *qcom_pcie)
+{
+ int err;
+ /* enable regulators */
+ err = regulator_enable(qcom_pcie->vdd_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->pcie_clk_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable pcie-clk regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->avdd_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable AVDD regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(qcom_pcie->pcie_ext3p3v_supply);
+ if (err < 0) {
+ dev_err(qcom_pcie->dev, "failed to enable pcie_ext3p3v regulator\n");
+ return err;
+ }
+
+ return err;
+
+}
+
+static int qcom_pcie_parse_dt(struct qcom_pcie *qcom_pcie,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base, *parf_base, *dwc_base;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
+ int ret, i;
+
+ qcom_pcie->ext_phy_ref_clk = of_property_read_bool(np,
+ "qcom,external-phy-refclk");
+
+ elbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ qcom_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(qcom_pcie->elbi_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap elbi space\n");
+ return PTR_ERR(qcom_pcie->elbi_base);
+ }
+
+ parf_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
+ qcom_pcie->parf_base = devm_ioremap_resource(&pdev->dev, parf_base);
+ if (IS_ERR(qcom_pcie->parf_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap parf space\n");
+ return PTR_ERR(qcom_pcie->parf_base);
+ }
+
+ dwc_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ qcom_pcie->dwc_base = devm_ioremap_resource(&pdev->dev, dwc_base);
+ if (IS_ERR(qcom_pcie->dwc_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap dwc_base space\n");
+ return PTR_ERR(qcom_pcie->dwc_base);
+ }
+
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(&pdev->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ switch (range.pci_space & 0x3) {
+ case 0: /* cfg */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->conf);
+ qcom_pcie->conf.flags = IORESOURCE_MEM;
+ break;
+ case 1: /* io */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->io);
+ break;
+ default: /* mem */
+ of_pci_range_to_resource(&range, np, &qcom_pcie->mem);
+ break;
+ }
+ }
+
+ qcom_pcie->vdd_supply = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(qcom_pcie->vdd_supply)) {
+ dev_err(&pdev->dev, "Failed to get vdd supply\n");
+ return PTR_ERR(qcom_pcie->vdd_supply);
+ }
+
+ qcom_pcie->pcie_clk_supply = devm_regulator_get(&pdev->dev, "pcie-clk");
+ if (IS_ERR(qcom_pcie->pcie_clk_supply)) {
+ dev_err(&pdev->dev, "Failed to get pcie clk supply\n");
+ return PTR_ERR(qcom_pcie->pcie_clk_supply);
+ }
+ qcom_pcie->avdd_supply = devm_regulator_get(&pdev->dev, "avdd");
+ if (IS_ERR(qcom_pcie->avdd_supply)) {
+ dev_err(&pdev->dev, "Failed to get avdd supply\n");
+ return PTR_ERR(qcom_pcie->avdd_supply);
+ }
+
+ qcom_pcie->pcie_ext3p3v_supply = devm_regulator_get(&pdev->dev,
+ "ext-3p3v");
+ if (IS_ERR(qcom_pcie->pcie_ext3p3v_supply)) {
+ dev_err(&pdev->dev, "Failed to get pcie_ext3p3v supply\n");
+ return PTR_ERR(qcom_pcie->pcie_ext3p3v_supply);
+ }
+
+ qcom_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+ if (!gpio_is_valid(qcom_pcie->reset_gpio)) {
+ dev_err(&pdev->dev, "pcie reset gpio is not valid\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&pdev->dev, qcom_pcie->reset_gpio,
+ GPIOF_DIR_OUT, "pcie_reset");
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request pcie reset gpio\n");
+ return ret;
+ }
+
+ qcom_pcie->iface_clk = devm_clk_get(&pdev->dev, "iface");
+ if (IS_ERR(qcom_pcie->iface_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie iface clock\n");
+ return PTR_ERR(qcom_pcie->iface_clk);
+ }
+
+ qcom_pcie->phy_clk = devm_clk_get(&pdev->dev, "phy");
+ if (IS_ERR(qcom_pcie->phy_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie phy clock\n");
+ return PTR_ERR(qcom_pcie->phy_clk);
+ }
+
+ qcom_pcie->bus_clk = devm_clk_get(&pdev->dev, "core");
+ if (IS_ERR(qcom_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie core clock\n");
+ return PTR_ERR(qcom_pcie->bus_clk);
+ }
+
+ qcom_pcie->axi_reset = devm_reset_control_get(&pdev->dev, "axi");
+ if (IS_ERR(qcom_pcie->axi_reset)) {
+ dev_err(&pdev->dev, "Failed to get axi reset\n");
+ return PTR_ERR(qcom_pcie->axi_reset);
+ }
+
+ qcom_pcie->ahb_reset = devm_reset_control_get(&pdev->dev, "ahb");
+ if (IS_ERR(qcom_pcie->ahb_reset)) {
+ dev_err(&pdev->dev, "Failed to get ahb reset\n");
+ return PTR_ERR(qcom_pcie->ahb_reset);
+ }
+
+ qcom_pcie->por_reset = devm_reset_control_get(&pdev->dev, "por");
+ if (IS_ERR(qcom_pcie->por_reset)) {
+ dev_err(&pdev->dev, "Failed to get por reset\n");
+ return PTR_ERR(qcom_pcie->por_reset);
+ }
+
+ qcom_pcie->pci_reset = devm_reset_control_get(&pdev->dev, "pci");
+ if (IS_ERR(qcom_pcie->pci_reset)) {
+ dev_err(&pdev->dev, "Failed to get pci reset\n");
+ return PTR_ERR(qcom_pcie->pci_reset);
+ }
+
+ qcom_pcie->phy_reset = devm_reset_control_get(&pdev->dev, "phy");
+ if (IS_ERR(qcom_pcie->phy_reset)) {
+ dev_err(&pdev->dev, "Failed to get phy reset\n");
+ return PTR_ERR(qcom_pcie->phy_reset);
+ }
+
+ for (i = 0; i < 4; i++) {
+ qcom_pcie->irq_int[i] = platform_get_irq(pdev, i+1);
+ if (qcom_pcie->irq_int[i] < 0) {
+ dev_err(&pdev->dev, "failed to get irq resource\n");
+ return qcom_pcie->irq_int[i];
+ }
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_probe(struct platform_device *pdev)
+{
+ unsigned long flags;
+ struct qcom_pcie *qcom_pcie;
+ struct hw_pci *hw;
+ int ret;
+ u32 val;
+
+ qcom_pcie = devm_kzalloc(&pdev->dev, sizeof(*qcom_pcie), GFP_KERNEL);
+ if (!qcom_pcie) {
+ dev_err(&pdev->dev, "no memory for qcom_pcie\n");
+ return -ENOMEM;
+ }
+ qcom_pcie->dev = &pdev->dev;
+
+ ret = qcom_pcie_parse_dt(qcom_pcie, pdev);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ qcom_pcie->cfg_base = devm_ioremap_resource(&pdev->dev,
+ &qcom_pcie->conf);
+ if (IS_ERR(qcom_pcie->cfg_base)) {
+ dev_err(&pdev->dev, "Failed to ioremap PCIe cfg space\n");
+ return PTR_ERR(qcom_pcie->cfg_base);
+ }
+
+ gpio_set_value(qcom_pcie->reset_gpio, 0);
+ usleep_range(10000, 15000);
+
+ /* enable power */
+ qcom_pcie_vreg_on(qcom_pcie);
+ /* assert PCIe PARF reset while powering the core */
+ reset_control_assert(qcom_pcie->ahb_reset);
+
+ /* enable clocks */
+ ret = clk_prepare_enable(qcom_pcie->iface_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(qcom_pcie->phy_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(qcom_pcie->bus_clk);
+ if (ret)
+ return ret;
+
+ /*
+ * de-assert PCIe PARF reset;
+ * wait 1us before accessing PARF registers
+ */
+ reset_control_deassert(qcom_pcie->ahb_reset);
+ udelay(1);
+
+ /* enable PCIe clocks and resets */
+ msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_CTRL,
+ BIT(0), 0);
+
+ /* Set Tx Termination Offset */
+ val = qcom_parf_readl_relaxed(qcom_pcie, PCIE20_PARF_PHY_CTRL);
+ val |= PCIE20_PARF_PHY_CTRL_PHY_TX0_TERM_OFFST(7);
+ qcom_parf_writel_relaxed(qcom_pcie, val, PCIE20_PARF_PHY_CTRL);
+
+ /* PARF programming */
+ qcom_parf_writel_relaxed(qcom_pcie,
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN1(0x18) |
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(0x18) |
+ PCIE20_PARF_PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(0x22),
+ PCIE20_PARF_PCS_DEEMPH);
+ qcom_parf_writel_relaxed(qcom_pcie,
+ PCIE20_PARF_PCS_SWING_TX_SWING_FULL(0x78) |
+ PCIE20_PARF_PCS_SWING_TX_SWING_LOW(0x78),
+ PCIE20_PARF_PCS_SWING);
+ qcom_parf_writel_relaxed(qcom_pcie, (4<<24), PCIE20_PARF_CONFIG_BITS);
+ /* ensure that hardware registers the PARF configuration */
+ wmb();
+
+ /* enable reference clock */
+ msm_pcie_write_mask(qcom_pcie->parf_base + PCIE20_PARF_PHY_REFCLK,
+ qcom_pcie->ext_phy_ref_clk ? 0 : BIT(12),
+ BIT(16));
+
+ /* ensure that access is enabled before proceeding */
+ wmb();
+
+ /* de-assert PICe PHY, Core, POR and AXI clk domain resets */
+ reset_control_deassert(qcom_pcie->phy_reset);
+ reset_control_deassert(qcom_pcie->pci_reset);
+ reset_control_deassert(qcom_pcie->por_reset);
+ reset_control_deassert(qcom_pcie->axi_reset);
+
+ /* wait 150ms for clock acquisition */
+ usleep_range(10000, 15000);
+
+ /* de-assert PCIe reset link to bring EP out of reset */
+ gpio_set_value(qcom_pcie->reset_gpio, 1);
+ usleep_range(10000, 15000);
+
+ /* enable link training */
+ val = qcom_elbi_readl_relaxed(qcom_pcie, PCIE20_ELBI_SYS_CTRL);
+ val |= PCIE20_ELBI_SYS_CTRL_LTSSM_EN;
+ qcom_elbi_writel_relaxed(qcom_pcie, val, PCIE20_ELBI_SYS_CTRL);
+ wmb();
+
+ /* poll for link to come up for upto 100ms */
+ ret = readl_poll_timeout(
+ (qcom_pcie->dwc_base + PCIE20_CAP_LINKCTRLSTATUS),
+ val, (val & BIT(29)), 10000, 100000);
+
+ dev_info(&pdev->dev, "link initialized %d\n", ret);
+
+ qcom_pcie_config_controller(qcom_pcie);
+
+ platform_set_drvdata(pdev, qcom_pcie);
+
+ spin_lock_irqsave(&qcom_hw_pci_lock, flags);
+ qcom_hw_pci[nr_controllers].private_data = (void **)&qcom_pcie;
+ hw = &qcom_hw_pci[nr_controllers];
+
+#ifdef CONFIG_PCI_MSI
+ hw->msi_ctrl = &qcom_pcie->msi.chip;
+#endif
+ nr_controllers++;
+ spin_unlock_irqrestore(&qcom_hw_pci_lock, flags);
+
+ ///pci_common_init(hw);
+ pci_common_init_dev(&pdev->dev, hw);
+
+ msm_pcie_msi_init(qcom_pcie, pdev);
+ return 0;
+}
+
+static int __exit qcom_pcie_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id qcom_pcie_match[] = {
+ { .compatible = "qcom,pcie-ipq8064", },
+ {}
+};
+
+static struct platform_driver qcom_pcie_driver = {
+ .probe = qcom_pcie_probe,
+ .remove = qcom_pcie_remove,
+ .driver = {
+ .name = "qcom_pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = qcom_pcie_match,
+ },
+};
+
+static int qcom_pcie_init(void)
+{
+ return platform_driver_register(&qcom_pcie_driver);
+}
+subsys_initcall_sync(qcom_pcie_init);
+
+/* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
+static void msm_pcie_fixup_early(struct pci_dev *dev)
+{
+ if (dev->hdr_type == 1)
+ dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
+}
+DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_RCP, PCIE_DEVICE_ID_RCP, msm_pcie_fixup_early);
diff --git a/drivers/power/avs/Kconfig b/drivers/power/avs/Kconfig
index a67eeace6a89..67e77e4e86a9 100644
--- a/drivers/power/avs/Kconfig
+++ b/drivers/power/avs/Kconfig
@@ -11,6 +11,20 @@ menuconfig POWER_AVS
Say Y here to enable Adaptive Voltage Scaling class support.
+config QCOM_CPR
+ tristate "QCOM Core Power Reduction (CPR) support"
+ select PM_OPP
+ help
+ Say Y here to enable support for the CPR hardware found on Qualcomm
+ SoCs like MSM8916.
+
+ This driver populates CPU OPPs tables and makes adjustments to the
+ tables based on feedback from the CPR hardware. If you want to do
+ CPUfrequency scaling say Y here.
+
+ To compile this driver as a module, choose M here: the module will
+ be called qcom-cpr
+
config ROCKCHIP_IODOMAIN
tristate "Rockchip IO domain support"
depends on POWER_AVS && ARCH_ROCKCHIP && OF
diff --git a/drivers/power/avs/Makefile b/drivers/power/avs/Makefile
index ba4c7bc69225..88f4d5d49cba 100644
--- a/drivers/power/avs/Makefile
+++ b/drivers/power/avs/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_POWER_AVS_OMAP) += smartreflex.o
obj-$(CONFIG_ROCKCHIP_IODOMAIN) += rockchip-io-domain.o
+obj-$(CONFIG_QCOM_CPR) += qcom-cpr.o
diff --git a/drivers/power/avs/qcom-cpr.c b/drivers/power/avs/qcom-cpr.c
new file mode 100644
index 000000000000..3daa2153d742
--- /dev/null
+++ b/drivers/power/avs/qcom-cpr.c
@@ -0,0 +1,1988 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/interrupt.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/cpufreq.h>
+#include <linux/bitops.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+/* Register Offsets for RB-CPR and Bit Definitions */
+
+/* RBCPR Version Register */
+#define REG_RBCPR_VERSION 0
+#define RBCPR_VER_2 0x02
+
+/* RBCPR Gate Count and Target Registers */
+#define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * n)
+
+#define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
+#define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
+#define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
+#define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
+
+/* RBCPR Timer Control */
+#define REG_RBCPR_TIMER_INTERVAL 0x44
+#define REG_RBIF_TIMER_ADJUST 0x4c
+
+#define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
+#define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
+#define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
+#define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
+#define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
+
+/* RBCPR Config Register */
+#define REG_RBIF_LIMIT 0x48
+#define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
+#define RBIF_LIMIT_CEILING_SHIFT 6
+#define RBIF_LIMIT_FLOOR_BITS 6
+#define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
+
+#define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
+#define RBIF_LIMIT_FLOOR_DEFAULT 0
+
+#define REG_RBIF_SW_VLEVEL 0x94
+#define RBIF_SW_VLEVEL_DEFAULT 0x20
+
+#define REG_RBCPR_STEP_QUOT 0x80
+#define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
+#define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
+
+/* RBCPR Control Register */
+#define REG_RBCPR_CTL 0x90
+
+#define RBCPR_CTL_LOOP_EN BIT(0)
+#define RBCPR_CTL_TIMER_EN BIT(3)
+#define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
+#define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
+#define RBCPR_CTL_COUNT_MODE BIT(10)
+#define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
+#define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
+#define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
+
+/* RBCPR Ack/Nack Response */
+#define REG_RBIF_CONT_ACK_CMD 0x98
+#define REG_RBIF_CONT_NACK_CMD 0x9c
+
+/* RBCPR Result status Register */
+#define REG_RBCPR_RESULT_0 0xa0
+
+#define RBCPR_RESULT0_BUSY_SHIFT 19
+#define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
+#define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
+#define RBCPR_RESULT0_ERROR_SHIFT 6
+#define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
+#define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
+#define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
+#define RBCPR_RESULT0_STEP_UP_SHIFT 1
+
+/* RBCPR Interrupt Control Register */
+#define REG_RBIF_IRQ_EN(n) (0x100 + 4 * n)
+#define REG_RBIF_IRQ_CLEAR 0x110
+#define REG_RBIF_IRQ_STATUS 0x114
+
+#define CPR_INT_DONE BIT(0)
+#define CPR_INT_MIN BIT(1)
+#define CPR_INT_DOWN BIT(2)
+#define CPR_INT_MID BIT(3)
+#define CPR_INT_UP BIT(4)
+#define CPR_INT_MAX BIT(5)
+#define CPR_INT_CLAMP BIT(6)
+#define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
+ CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
+#define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
+
+#define CPR_NUM_RING_OSC 8
+
+/* RBCPR Clock Control Register */
+#define RBCPR_CLK_SEL_MASK BIT(-1)
+#define RBCPR_CLK_SEL_19P2_MHZ 0
+#define RBCPR_CLK_SEL_AHB_CLK BIT(0)
+
+/* CPR eFuse parameters */
+#define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
+
+#define CPR_FUSE_MIN_QUOT_DIFF 50
+
+#define SPEED_BIN_NONE UINT_MAX
+
+#define FUSE_REVISION_UNKNOWN (-1)
+#define FUSE_MAP_NO_MATCH (-1)
+#define FUSE_PARAM_MATCH_ANY 0xffffffff
+
+/**
+ * enum vdd_mx_vmin_method - Method to determine vmin for vdd-mx
+ * @VDD_MX_VMIN_APC: Use APC voltage
+ * @VDD_MX_VMIN_APC_CORNER_CEILING: Use PVS corner ceiling voltage
+ * @VDD_MX_VMIN_APC_SLOW_CORNER_CEILING: Use slow speed corner ceiling
+ * @VDD_MX_VMIN_MX_VMAX: Use specified vdd-mx-vmax voltage
+ * @VDD_MX_VMIN_APC_CORNER_MAP: Use APC corner mapped MX voltage
+ */
+enum vdd_mx_vmin_method {
+ VDD_MX_VMIN_APC,
+ VDD_MX_VMIN_APC_CORNER_CEILING,
+ VDD_MX_VMIN_APC_SLOW_CORNER_CEILING,
+ VDD_MX_VMIN_MX_VMAX,
+ VDD_MX_VMIN_APC_CORNER_MAP,
+};
+/* TODO: Trim these above to used values */
+
+enum voltage_change_dir {
+ NO_CHANGE,
+ DOWN,
+ UP,
+};
+
+struct qfprom_offset {
+ u16 offset;
+ u8 width;
+ u8 shift;
+};
+
+struct cpr_fuse {
+ struct qfprom_offset ring_osc;
+ struct qfprom_offset init_voltage;
+ struct qfprom_offset quotient;
+ struct qfprom_offset quotient_offset;
+};
+
+struct fuse_corner_data {
+ int ref_uV;
+ int max_uV;
+ int min_uV;
+ int max_quot_scale;
+ int quot_offset;
+ int quot_scale;
+ int max_volt_scale;
+ int vdd_mx_req;
+};
+
+struct cpr_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ int init_voltage_step;
+ struct fuse_corner_data *fuse_corner_data;
+ struct cpr_fuse *cpr_fuse;
+ struct qfprom_offset *disable;
+};
+
+struct pvs_bin {
+ int *uV;
+};
+
+struct pvs_fuses {
+ struct qfprom_offset redundant;
+ u8 redundant_value;
+ struct qfprom_offset *pvs_fuse;
+ struct pvs_bin *pvs_bins;
+};
+
+struct corner_data {
+ unsigned int fuse_corner;
+ unsigned long freq;
+};
+
+struct freq_plan {
+ u32 speed_bin;
+ u32 pvs_version;
+ const struct corner_data **plan;
+};
+
+struct fuse_conditional_min_volt {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int min_uV;
+};
+
+struct fuse_uplift_wa {
+ struct qfprom_offset redundant;
+ u8 expected;
+ int uV;
+ int *quot;
+ int max_uV;
+ int speed_bin;
+};
+
+struct corner_override {
+ u32 speed_bin;
+ u32 pvs_version;
+ int *max_uV;
+ int *min_uV;
+};
+
+struct corner_adjustment {
+ u32 speed_bin;
+ u32 pvs_version;
+ u32 cpr_rev;
+ u8 *ring_osc_idx;
+ int *fuse_quot;
+ int *fuse_quot_diff;
+ int *fuse_quot_min;
+ int *fuse_quot_offset;
+ int *fuse_init_uV;
+ int *quot;
+ int *init_uV;
+ bool disable_closed_loop;
+};
+
+struct cpr_desc {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+ int min_diff_quot;
+ int *step_quot;
+ struct cpr_fuses cpr_fuses;
+ struct qfprom_offset fuse_revision;
+ struct qfprom_offset speed_bin;
+ struct qfprom_offset pvs_version;
+ struct corner_data *corner_data;
+ struct freq_plan *freq_plans;
+ size_t num_freq_plans;
+ struct pvs_fuses *pvs_fuses;
+ struct fuse_conditional_min_volt *min_volt_fuse;
+ struct fuse_uplift_wa *uplift_wa;
+ struct corner_override *corner_overrides;
+ size_t num_corner_overrides;
+ struct corner_adjustment *adjustments;
+ size_t num_adjustments;
+ bool reduce_to_fuse_uV;
+ bool reduce_to_corner_uV;
+};
+
+struct acc_desc {
+ unsigned int enable_reg;
+ u32 enable_mask;
+
+ struct reg_sequence *settings;
+ struct reg_sequence *override_settings;
+ int num_regs_per_fuse;
+
+ struct qfprom_offset override;
+ u8 override_value;
+};
+
+struct fuse_corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int quot;
+ int step_quot;
+ const struct reg_sequence *accs;
+ int num_accs;
+ int vdd_mx_req;
+ unsigned long max_freq;
+ u8 ring_osc_idx;
+};
+
+struct corner {
+ int min_uV;
+ int max_uV;
+ int uV;
+ int last_uV;
+ int quot_adjust;
+ u32 save_ctl;
+ u32 save_irq;
+ unsigned long freq;
+ struct fuse_corner *fuse_corner;
+};
+
+struct cpr_drv {
+ unsigned int num_fuse_corners;
+ unsigned int num_corners;
+
+ unsigned int nb_count;
+ struct notifier_block cpufreq_nb;
+ bool switching_opp;
+ struct notifier_block reg_nb;
+
+ unsigned int ref_clk_khz;
+ unsigned int timer_delay_us;
+ unsigned int timer_cons_up;
+ unsigned int timer_cons_down;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int idle_clocks;
+ unsigned int gcnt_us;
+ unsigned int vdd_apc_step_up_limit;
+ unsigned int vdd_apc_step_down_limit;
+ unsigned int clamp_timer_interval;
+ int ceiling_max;
+ enum vdd_mx_vmin_method vdd_mx_vmin_method;
+ int vdd_mx_vmax;
+
+ struct mutex lock;
+ void __iomem *base;
+ struct corner *corner;
+ struct regulator *vdd_apc;
+ struct regulator *vdd_mx;
+ struct clk *cpu_clk;
+ struct device *cpu_dev;
+ struct regmap *tcsr;
+ bool loop_disabled;
+ bool suspended;
+ u32 gcnt;
+ unsigned long flags;
+#define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
+
+ struct fuse_corner *fuse_corners;
+ struct corner *corners;
+};
+
+static bool cpr_is_allowed(struct cpr_drv *drv)
+{
+ if (drv->loop_disabled) /* || disabled in software */
+ return false;
+ else
+ return true;
+}
+
+static void cpr_write(struct cpr_drv *drv, u32 offset, u32 value)
+{
+ writel_relaxed(value, drv->base + offset);
+}
+
+static u32 cpr_read(struct cpr_drv *drv, u32 offset)
+{
+ return readl_relaxed(drv->base + offset);
+}
+
+static void
+cpr_masked_write(struct cpr_drv *drv, u32 offset, u32 mask, u32 value)
+{
+ u32 val;
+
+ val = readl_relaxed(drv->base + offset);
+ val &= ~mask;
+ val |= value & mask;
+ writel_relaxed(val, drv->base + offset);
+}
+
+static void cpr_irq_clr(struct cpr_drv *drv)
+{
+ cpr_write(drv, REG_RBIF_IRQ_CLEAR, CPR_INT_ALL);
+}
+
+static void cpr_irq_clr_nack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+}
+
+static void cpr_irq_clr_ack(struct cpr_drv *drv)
+{
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+}
+
+static void cpr_irq_set(struct cpr_drv *drv, u32 int_bits)
+{
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), int_bits);
+}
+
+static void cpr_ctl_modify(struct cpr_drv *drv, u32 mask, u32 value)
+{
+ cpr_masked_write(drv, REG_RBCPR_CTL, mask, value);
+}
+
+static void cpr_ctl_enable(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 val, mask;
+
+ if (drv->suspended)
+ return;
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ mask = RBIF_TIMER_ADJ_CONS_UP_MASK | RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST, mask, val);
+ cpr_masked_write(drv, REG_RBCPR_CTL,
+ RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN,
+ corner->save_ctl);
+ cpr_irq_set(drv, corner->save_irq);
+
+ if (cpr_is_allowed(drv) /*&& drv->vreg_enabled */ &&
+ corner->max_uV > corner->min_uV)
+ val = RBCPR_CTL_LOOP_EN;
+ else
+ val = 0;
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, val);
+}
+
+static void cpr_ctl_disable(struct cpr_drv *drv)
+{
+ if (drv->suspended)
+ return;
+
+ cpr_irq_set(drv, 0);
+ cpr_ctl_modify(drv, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN |
+ RBCPR_CTL_SW_AUTO_CONT_ACK_EN, 0);
+ cpr_masked_write(drv, REG_RBIF_TIMER_ADJUST,
+ RBIF_TIMER_ADJ_CONS_UP_MASK |
+ RBIF_TIMER_ADJ_CONS_DOWN_MASK, 0);
+ cpr_irq_clr(drv);
+ cpr_write(drv, REG_RBIF_CONT_ACK_CMD, 1);
+ cpr_write(drv, REG_RBIF_CONT_NACK_CMD, 1);
+ cpr_ctl_modify(drv, RBCPR_CTL_LOOP_EN, 0);
+}
+
+static bool cpr_ctl_is_enabled(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_CTL);
+ return reg_val & RBCPR_CTL_LOOP_EN;
+}
+
+static bool cpr_ctl_is_busy(struct cpr_drv *drv)
+{
+ u32 reg_val;
+
+ reg_val = cpr_read(drv, REG_RBCPR_RESULT_0);
+ return reg_val & RBCPR_RESULT0_BUSY_MASK;
+}
+
+static void cpr_corner_save(struct cpr_drv *drv, struct corner *corner)
+{
+ corner->save_ctl = cpr_read(drv, REG_RBCPR_CTL);
+ corner->save_irq = cpr_read(drv, REG_RBIF_IRQ_EN(0));
+}
+
+static void cpr_corner_restore(struct cpr_drv *drv, struct corner *corner)
+{
+ u32 gcnt, ctl, irq, ro_sel, step_quot;
+ struct fuse_corner *fuse = corner->fuse_corner;
+ int i;
+
+ ro_sel = fuse->ring_osc_idx;
+ gcnt = drv->gcnt;
+ gcnt |= fuse->quot - corner->quot_adjust;
+
+ /* Program the step quotient and idle clocks */
+ step_quot = drv->idle_clocks << RBCPR_STEP_QUOT_IDLE_CLK_SHIFT;
+ step_quot |= fuse->step_quot;
+ cpr_write(drv, REG_RBCPR_STEP_QUOT, step_quot);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(ro_sel), gcnt);
+ ctl = corner->save_ctl;
+ cpr_write(drv, REG_RBCPR_CTL, ctl);
+ irq = corner->save_irq;
+ cpr_irq_set(drv, irq);
+ pr_debug("gcnt = 0x%08x, ctl = 0x%08x, irq = 0x%08x\n", gcnt, ctl, irq);
+}
+
+static void cpr_corner_switch(struct cpr_drv *drv, struct corner *corner)
+{
+ if (drv->corner == corner)
+ return;
+
+ cpr_corner_restore(drv, corner);
+}
+
+static int
+cpr_mx_get(struct cpr_drv *drv, struct fuse_corner *fuse, int apc_volt)
+{
+ int vdd_mx;
+ struct fuse_corner *highest_fuse;
+
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+
+ switch (drv->vdd_mx_vmin_method) {
+ case VDD_MX_VMIN_APC:
+ vdd_mx = apc_volt;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_CEILING:
+ vdd_mx = fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_APC_SLOW_CORNER_CEILING:
+ vdd_mx = highest_fuse->max_uV;
+ break;
+ case VDD_MX_VMIN_MX_VMAX:
+ vdd_mx = drv->vdd_mx_vmax;
+ break;
+ case VDD_MX_VMIN_APC_CORNER_MAP:
+ vdd_mx = fuse->vdd_mx_req;
+ break;
+ default:
+ BUG();
+ }
+
+ return vdd_mx;
+}
+
+static void cpr_set_acc(struct regmap *tcsr, struct fuse_corner *f,
+ struct fuse_corner *end)
+{
+ if (f < end) {
+ for (f += 1; f <= end; f++)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ } else {
+ for (f -= 1; f >= end; f--)
+ regmap_multi_reg_write(tcsr, f->accs, f->num_accs);
+ }
+}
+
+static int cpr_pre_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == DOWN)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == UP)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_post_voltage(struct cpr_drv *drv,
+ struct fuse_corner *fuse_corner,
+ enum voltage_change_dir dir, int vdd_mx_vmin)
+{
+ int ret = 0;
+ struct fuse_corner *prev_fuse_corner = drv->corner->fuse_corner;
+
+ if (drv->tcsr && dir == UP)
+ cpr_set_acc(drv->tcsr, prev_fuse_corner, fuse_corner);
+
+ if (vdd_mx_vmin && dir == DOWN)
+ ret = qcom_rpm_set_corner(drv->vdd_mx, vdd_mx_vmin);
+
+ return ret;
+}
+
+static int cpr_regulator_notifier(struct notifier_block *nb,
+ unsigned long event, void *d)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, reg_nb);
+ u32 val, mask;
+ int last_uV, new_uV;
+
+ switch (event) {
+ case REGULATOR_EVENT_VOLTAGE_CHANGE:
+ new_uV = (int)(uintptr_t)d;
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&drv->lock);
+
+ last_uV = drv->corner->last_uV;
+
+ if (drv->switching_opp) {
+ goto unlock;
+ } else if (last_uV < new_uV) {
+ /* Disable auto nack down */
+ mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = 0;
+ } else if (last_uV > new_uV) {
+ /* Restore default threshold for UP */
+ mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = drv->up_threshold;
+ val <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ } else { /* Somehow it's the same? */
+ goto unlock;
+ }
+
+ cpr_ctl_modify(drv, mask, val);
+
+ /* Re-enable default interrupts */
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ /* Ack */
+ cpr_irq_clr_ack(drv);
+
+ /* Save register values for the corner */
+ cpr_corner_save(drv, drv->corner);
+ drv->corner->last_uV = new_uV;
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+static int cpr_scale(struct cpr_drv *drv, enum voltage_change_dir dir)
+{
+ u32 val, error_steps, reg_mask;
+ int last_uV, new_uV, step_uV;
+ struct corner *corner;
+ struct fuse_corner *fuse;
+
+ //step_uV = regulator_get_linear_step(drv->vdd_apc);
+ step_uV = 12500; /*TODO: Get step volt here */
+ corner = drv->corner;
+ fuse = corner->fuse_corner;
+
+ val = cpr_read(drv, REG_RBCPR_RESULT_0);
+
+ error_steps = val >> RBCPR_RESULT0_ERROR_STEPS_SHIFT;
+ error_steps &= RBCPR_RESULT0_ERROR_STEPS_MASK;
+ last_uV = corner->last_uV;
+
+ if (dir == UP) {
+ if (drv->clamp_timer_interval &&
+ error_steps < drv->up_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->up_threshold,
+ drv->vdd_apc_step_up_limit);
+ }
+
+ if (last_uV >= corner->max_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Maximize the UP threshold */
+ reg_mask = RBCPR_CTL_UP_THRESHOLD_MASK;
+ reg_mask <<= RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val = reg_mask;
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable UP interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_UP);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_up_limit)
+ error_steps = drv->vdd_apc_step_up_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV + error_steps * step_uV;
+ if (new_uV > corner->max_uV)
+ new_uV = corner->max_uV;
+ } else if (dir == DOWN) {
+ if (drv->clamp_timer_interval
+ && error_steps < drv->down_threshold) {
+ /*
+ * Handle the case where another measurement started
+ * after the interrupt was triggered due to a core
+ * exiting from power collapse.
+ */
+ error_steps = max(drv->down_threshold,
+ drv->vdd_apc_step_down_limit);
+ }
+
+ if (last_uV <= corner->min_uV) {
+ cpr_irq_clr_nack(drv);
+
+ /* Enable auto nack down */
+ reg_mask = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+ val = RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN;
+
+ cpr_ctl_modify(drv, reg_mask, val);
+
+ /* Disable DOWN interrupt */
+ cpr_irq_set(drv, CPR_INT_DEFAULT & ~CPR_INT_DOWN);
+
+ return 0;
+ }
+
+ if (error_steps > drv->vdd_apc_step_down_limit)
+ error_steps = drv->vdd_apc_step_down_limit;
+
+ /* Calculate new voltage */
+ new_uV = last_uV - error_steps * step_uV;
+ if (new_uV < corner->min_uV)
+ new_uV = corner->min_uV;
+ }
+
+ return new_uV;
+}
+
+static irqreturn_t cpr_irq_handler(int irq, void *dev)
+{
+ struct cpr_drv *drv = dev;
+ u32 val;
+ int new_uV = 0;
+ struct corner *corner;
+
+ mutex_lock(&drv->lock);
+
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+ if (drv->flags & FLAGS_IGNORE_1ST_IRQ_STATUS)
+ val = cpr_read(drv, REG_RBIF_IRQ_STATUS);
+
+ pr_debug("IRQ_STATUS = %#02x\n", val);
+
+ if (!cpr_ctl_is_enabled(drv)) {
+ pr_debug("CPR is disabled\n");
+ goto unlock;
+ } else if (cpr_ctl_is_busy(drv) && !drv->clamp_timer_interval) {
+ pr_debug("CPR measurement is not ready\n");
+ goto unlock;
+ } else if (!cpr_is_allowed(drv)) {
+ val = cpr_read(drv, REG_RBCPR_CTL);
+ pr_err_ratelimited("Interrupt broken? RBCPR_CTL = %#02x\n",
+ val);
+ goto unlock;
+ }
+
+ /* Following sequence of handling is as per each IRQ's priority */
+ if (val & CPR_INT_UP) {
+ new_uV = cpr_scale(drv, UP);
+ } else if (val & CPR_INT_DOWN) {
+ new_uV = cpr_scale(drv, DOWN);
+ } else if (val & CPR_INT_MIN) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MAX) {
+ cpr_irq_clr_nack(drv);
+ } else if (val & CPR_INT_MID) {
+ /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
+ pr_debug("IRQ occurred for Mid Flag\n");
+ } else {
+ pr_debug("IRQ occurred for unknown flag (%#08x)\n", val);
+ }
+
+ /* Save register values for the corner */
+ corner = drv->corner;
+ cpr_corner_save(drv, corner);
+unlock:
+ mutex_unlock(&drv->lock);
+
+ if (new_uV)
+ dev_pm_opp_adjust_voltage(drv->cpu_dev, corner->freq, new_uV);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * TODO: Register for hotplug notifier and turn on/off CPR when CPUs are offline
+ */
+static int cpr_enable(struct cpr_drv *drv)
+{
+ int ret;
+
+ /* Enable dependency power before vdd_apc */
+ if (drv->vdd_mx) {
+ ret = regulator_enable(drv->vdd_mx);
+ if (ret)
+ return ret;
+ }
+
+ ret = regulator_enable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = true;
+ if (cpr_is_allowed(drv) && drv->corner) {
+ cpr_irq_clr(drv);
+ cpr_corner_restore(drv, drv->corner);
+ cpr_ctl_enable(drv, drv->corner);
+ }
+ mutex_unlock(&drv->lock);
+ pr_info("CPR is enabled!\n");
+
+ return 0;
+}
+
+static int cpr_disable(struct cpr_drv *drv)
+{
+ int ret;
+
+ ret = regulator_disable(drv->vdd_apc);
+ if (ret)
+ return ret;
+
+ if (drv->vdd_mx)
+ ret = regulator_disable(drv->vdd_mx);
+ if (ret)
+ return ret;
+
+ mutex_lock(&drv->lock);
+ //drv->vreg_enabled = false;
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+ mutex_unlock(&drv->lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cpr_suspend(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ cpr_ctl_disable(drv);
+ cpr_irq_clr(drv);
+ drv->suspended = true;
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+
+static int cpr_resume(struct device *dev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(to_platform_device(dev));
+
+ if (cpr_is_allowed(drv)) {
+ mutex_lock(&drv->lock);
+ drv->suspended = false;
+ cpr_irq_clr(drv);
+ cpr_ctl_enable(drv, drv->corner);
+ mutex_unlock(&drv->lock);
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpr_pm_ops, cpr_suspend, cpr_resume);
+
+static int cpr_config(struct cpr_drv *drv)
+{
+ int i;
+ u32 val, gcnt;
+ struct corner *corner;
+
+ /* Disable interrupt and CPR */
+ cpr_write(drv, REG_RBIF_IRQ_EN(0), 0);
+ cpr_write(drv, REG_RBCPR_CTL, 0);
+
+ /* Program the default HW Ceiling, Floor and vlevel */
+ val = RBIF_LIMIT_CEILING_DEFAULT << RBIF_LIMIT_CEILING_SHIFT;
+ val |= RBIF_LIMIT_FLOOR_DEFAULT;
+ cpr_write(drv, REG_RBIF_LIMIT, val);
+ cpr_write(drv, REG_RBIF_SW_VLEVEL, RBIF_SW_VLEVEL_DEFAULT);
+
+ /* Clear the target quotient value and gate count of all ROs */
+ for (i = 0; i < CPR_NUM_RING_OSC; i++)
+ cpr_write(drv, REG_RBCPR_GCNT_TARGET(i), 0);
+
+ /* Init and save gcnt */
+ gcnt = (drv->ref_clk_khz * drv->gcnt_us) / 1000;
+ gcnt = gcnt & RBCPR_GCNT_TARGET_GCNT_MASK;
+ gcnt <<= RBCPR_GCNT_TARGET_GCNT_SHIFT;
+ drv->gcnt = gcnt;
+
+ /* Program the delay count for the timer */
+ val = (drv->ref_clk_khz * drv->timer_delay_us) / 1000;
+ cpr_write(drv, REG_RBCPR_TIMER_INTERVAL, val);
+ pr_debug("Timer count: 0x%0x (for %d us)\n", val, drv->timer_delay_us);
+
+ /* Program Consecutive Up & Down */
+ val = drv->timer_cons_down << RBIF_TIMER_ADJ_CONS_DOWN_SHIFT;
+ val |= drv->timer_cons_up << RBIF_TIMER_ADJ_CONS_UP_SHIFT;
+ val |= drv->clamp_timer_interval << RBIF_TIMER_ADJ_CLAMP_INT_SHIFT;
+ cpr_write(drv, REG_RBIF_TIMER_ADJUST, val);
+
+ /* Program the control register */
+ val = drv->up_threshold << RBCPR_CTL_UP_THRESHOLD_SHIFT;
+ val |= drv->down_threshold << RBCPR_CTL_DN_THRESHOLD_SHIFT;
+ val |= RBCPR_CTL_TIMER_EN | RBCPR_CTL_COUNT_MODE;
+ val |= RBCPR_CTL_SW_AUTO_CONT_ACK_EN;
+ cpr_write(drv, REG_RBCPR_CTL, val);
+
+ for (i = 0; i < drv->num_corners; i++) {
+ corner = &drv->corners[i];
+ corner->save_ctl = val;
+ corner->save_irq = CPR_INT_DEFAULT;
+ }
+
+ cpr_irq_set(drv, CPR_INT_DEFAULT);
+
+ val = cpr_read(drv, REG_RBCPR_VERSION);
+ if (val <= RBCPR_VER_2)
+ drv->flags |= FLAGS_IGNORE_1ST_IRQ_STATUS;
+
+ return 0;
+}
+
+/* Called twice for each CPU in policy, one pre and one post event */
+static int
+cpr_cpufreq_notifier(struct notifier_block *nb, unsigned long event, void *f)
+{
+ struct cpr_drv *drv = container_of(nb, struct cpr_drv, cpufreq_nb);
+ struct cpufreq_freqs *freqs = f;
+ unsigned long old = freqs->old * 1000;
+ unsigned long new = freqs->new * 1000;
+ struct corner *corner, *end;
+ enum voltage_change_dir dir;
+ int ret = 0, new_uV;
+ int vdd_mx_vmin = 0;
+ struct fuse_corner *fuse_corner;
+
+ /* Determine direction */
+ if (old > new)
+ dir = DOWN;
+ else if (old < new)
+ dir = UP;
+ else
+ dir = NO_CHANGE;
+
+ /* Determine new corner we're going to */
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ for (; corner <= end; corner++)
+ if (corner->freq == new)
+ break;
+
+ if (corner > end)
+ return -EINVAL;
+
+ fuse_corner = corner->fuse_corner;
+
+ if (cpr_is_allowed(drv)) {
+ new_uV = corner->last_uV;
+ } else {
+ new_uV = corner->uV;
+ }
+
+ if (dir != NO_CHANGE && drv->vdd_mx)
+ vdd_mx_vmin = cpr_mx_get(drv, fuse_corner, new_uV);
+
+ mutex_lock(&drv->lock);
+ if (event == CPUFREQ_PRECHANGE) {
+ if (drv->nb_count++)
+ goto unlock;
+
+ pr_debug("Pre change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+ if (cpr_is_allowed(drv))
+ cpr_ctl_disable(drv);
+
+ ret = cpr_pre_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ drv->switching_opp = true;
+ }
+
+ if (event == CPUFREQ_POSTCHANGE) {
+ if (--drv->nb_count)
+ goto unlock;
+
+ pr_debug("Post change [%ld] %p @ %lu?\n", corner - drv->corners,
+ corner, corner->freq);
+
+ ret = cpr_post_voltage(drv, fuse_corner, dir, vdd_mx_vmin);
+ if (ret)
+ goto unlock;
+
+ if (cpr_is_allowed(drv) /* && drv->vreg_enabled */) {
+ cpr_irq_clr(drv);
+ cpr_corner_switch(drv, corner);
+ cpr_ctl_enable(drv, corner);
+ }
+
+ drv->corner = corner;
+ drv->switching_opp = false;
+ }
+unlock:
+ mutex_unlock(&drv->lock);
+
+ return ret;
+}
+
+static u32 cpr_read_efuse(void __iomem *prom, const struct qfprom_offset *efuse)
+{
+ u64 buffer = 0;
+ u8 val;
+ int i, num_bytes;
+
+ num_bytes = DIV_ROUND_UP(efuse->width + efuse->shift, BITS_PER_BYTE);
+
+ for (i = 0; i < num_bytes; i++) {
+ val = readb_relaxed(prom + efuse->offset + i);
+ buffer |= val << (i * BITS_PER_BYTE);
+ }
+
+ buffer >>= efuse->shift;
+ buffer &= BIT(efuse->width) - 1;
+
+ return buffer;
+}
+
+static void
+cpr_populate_ring_osc_idx(const struct cpr_fuse *fuses, struct cpr_drv *drv,
+ void __iomem *prom)
+{
+ struct fuse_corner *fuse = drv->fuse_corners;
+ struct fuse_corner *end = fuse + drv->num_fuse_corners;
+
+ for (; fuse < end; fuse++, fuses++)
+ fuse->ring_osc_idx = cpr_read_efuse(prom, &fuses->ring_osc);
+}
+
+
+static const struct corner_adjustment *cpr_find_adjustment(u32 speed_bin,
+ u32 pvs_version, u32 cpr_rev, const struct cpr_desc *desc,
+ const struct cpr_drv *drv)
+{
+ int i, j;
+ u32 val, ro;
+ struct corner_adjustment *a;
+
+ for (i = 0; i < desc->num_adjustments; i++) {
+ a = &desc->adjustments[i];
+
+ if (a->speed_bin != speed_bin &&
+ a->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->pvs_version != pvs_version &&
+ a->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (a->cpr_rev != cpr_rev &&
+ a->cpr_rev != FUSE_PARAM_MATCH_ANY)
+ continue;
+ for (j = 0; j < drv->num_fuse_corners; j++) {
+ val = a->ring_osc_idx[j];
+ ro = drv->fuse_corners[j].ring_osc_idx;
+ if (val != ro && val != FUSE_PARAM_MATCH_ANY)
+ break;
+ }
+ if (j == drv->num_fuse_corners)
+ return a;
+ }
+
+ return NULL;
+}
+
+static void cpr_fuse_corner_init(struct cpr_drv *drv,
+ const struct cpr_desc *desc,
+ void __iomem *qfprom,
+ const struct cpr_fuse *fuses, u32 speed,
+ const struct corner_adjustment *adjustments,
+ const struct acc_desc *acc_desc)
+{
+ int i;
+ unsigned int idx = 0;
+ unsigned int step_volt;
+ int steps, step_size_uv;
+ const struct fuse_corner_data *fdata;
+ struct fuse_corner *fuse, *end, *prev;
+ const struct qfprom_offset *pvs_efuse;
+ const struct qfprom_offset *init_v_efuse;
+ const struct qfprom_offset *redun;
+ const struct fuse_conditional_min_volt *min_v;
+ const struct fuse_uplift_wa *up;
+ bool do_min_v = false, do_uplift = false;
+ const int *pvs_uV = NULL;
+ const int *adj_uV, *adj_quot, *adj_min, *min_diff_quot;
+ const int *step_quot;
+ int uV, diff;
+ u32 bits, bin;
+ u32 min_uV;
+ u8 expected;
+ const struct reg_sequence *accs;
+
+ redun = &acc_desc->override;
+ expected = acc_desc->override_value;
+ if (redun->width && cpr_read_efuse(qfprom, redun) == expected)
+ accs = acc_desc->override_settings;
+ else
+ accs = acc_desc->settings;
+
+ /* Figure out if we should apply workarounds */
+ min_v = desc->min_volt_fuse;
+ do_min_v = min_v &&
+ cpr_read_efuse(qfprom, &min_v->redundant) == min_v->expected;
+ if (do_min_v)
+ min_uV = min_v->min_uV;
+
+ up = desc->uplift_wa;
+ if (!do_min_v && up)
+ if (cpr_read_efuse(qfprom, &up->redundant) == up->expected)
+ do_uplift = up->speed_bin == speed;
+
+ adj_uV = adjustments ? adjustments->fuse_init_uV : NULL;
+ adj_quot = adjustments ? adjustments->fuse_quot : NULL;
+ adj_min = adjustments ? adjustments->fuse_quot_min : NULL;
+ min_diff_quot = adjustments ? adjustments->fuse_quot_diff : NULL;
+ fuse = drv->fuse_corners;
+ end = &fuse[drv->num_fuse_corners - 1];
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ step_quot = desc->step_quot;
+
+ /*
+ * The initial voltage for each fuse corner may be determined by one of
+ * two ways. Either initial voltages are encoded for each fuse corner
+ * in a dedicated fuse per fuse corner (fuses::init_voltage), or we
+ * use the PVS bin fuse to use a table of initial voltages (pvs_uV).
+ */
+ if (fuses->init_voltage.width) {
+ //step_volt = regulator_get_linear_step(drv->vdd_apc);
+ step_volt = 12500; /* TODO: Replace with ^ when apc_reg ready */
+ step_size_uv = desc->cpr_fuses.init_voltage_step;
+ } else {
+ redun = &desc->pvs_fuses->redundant;
+ expected = desc->pvs_fuses->redundant_value;
+ if (redun->width)
+ idx = !!(cpr_read_efuse(qfprom, redun) == expected);
+
+ pvs_efuse = &desc->pvs_fuses->pvs_fuse[idx];
+ bin = cpr_read_efuse(qfprom, pvs_efuse);
+ pvs_uV = desc->pvs_fuses->pvs_bins[bin].uV;
+ }
+
+ /* Populate fuse_corner voltage and ring_osc_idx members */
+ prev = NULL;
+ for (i = 0; fuse <= end; fuse++, fuses++, i++) {
+ if (pvs_uV) {
+ uV = pvs_uV[i];
+ } else {
+ init_v_efuse = &fuses->init_voltage;
+ bits = cpr_read_efuse(qfprom, init_v_efuse);
+ /* Not two's complement.. instead highest bit is sign */
+ steps = bits & BIT(init_v_efuse->width - 1) ? -1 : 1;
+ steps *= bits & ~BIT(init_v_efuse->width - 1);
+
+ uV = fdata[i].ref_uV + steps * step_size_uv;
+ uV = DIV_ROUND_UP(uV, step_volt) * step_volt;
+ }
+
+ if (adj_uV)
+ uV += adj_uV[i];
+
+ fuse->min_uV = fdata[i].min_uV;
+ fuse->max_uV = fdata[i].max_uV;
+
+ if (do_min_v) {
+ if (fuse->max_uV < min_uV) {
+ fuse->max_uV = min_uV;
+ fuse->min_uV = min_uV;
+ } else if (fuse->min_uV < min_uV) {
+ fuse->min_uV = min_uV;
+ }
+ }
+
+ fuse->uV = clamp(uV, fuse->min_uV, fuse->max_uV);
+
+ if (fuse == end) {
+ if (do_uplift) {
+ end->uV += up->uV;
+ end->uV = clamp(end->uV, 0, up->max_uV);
+ }
+ /*
+ * Allow the highest fuse corner's PVS voltage to
+ * define the ceiling voltage for that corner in order
+ * to support SoC's in which variable ceiling values
+ * are required.
+ */
+ end->max_uV = max(end->max_uV, end->uV);
+ }
+
+ /* Unpack the target quotient by scaling. */
+ fuse->quot = cpr_read_efuse(qfprom, &fuses->quotient);
+ fuse->quot *= fdata[i].quot_scale;
+ fuse->quot += fdata[i].quot_offset;
+
+ if (adj_quot) {
+ fuse->quot += adj_quot[i];
+
+ if (prev && min_diff_quot) {
+ diff = min_diff_quot[i];
+ if (fuse->quot - prev->quot <= diff)
+ fuse->quot = prev->quot + adj_min[i];
+ }
+ prev = fuse;
+ }
+
+ if (do_uplift)
+ fuse->quot += up->quot[i];
+
+ fuse->step_quot = step_quot[fuse->ring_osc_idx];
+
+ fuse->accs = accs;
+ fuse->num_accs = acc_desc->num_regs_per_fuse;
+ accs += acc_desc->num_regs_per_fuse;
+
+ fuse->vdd_mx_req = fdata[i].vdd_mx_req;
+ }
+
+ /*
+ * Restrict all fuse corner PVS voltages based upon per corner
+ * ceiling and floor voltages.
+ */
+ for (fuse = drv->fuse_corners, i = 0; fuse <= end; fuse++, i++) {
+ if (fuse->uV > fuse->max_uV)
+ fuse->uV = fuse->max_uV;
+ else if (fuse->uV < fuse->min_uV)
+ fuse->uV = fuse->min_uV;
+
+ pr_debug("fuse corner %d: [%d %d %d] RO%d quot %d squot %d\n", i,
+ fuse->min_uV, fuse->uV, fuse->max_uV,
+ fuse->ring_osc_idx, fuse->quot,
+ fuse->step_quot);
+ }
+
+ drv->ceiling_max = end->max_uV;
+}
+
+static int cpr_populate_opps(struct device_node *of_node, struct cpr_drv *drv,
+ const struct corner_data **plan)
+{
+ int i, j, ret, cpu;
+ struct device *cpu_dev;
+ struct device_node *np;
+ struct corner *corner;
+ const struct corner_data *p;
+
+ for (i = 0; (np = of_parse_phandle(of_node, "qcom,cpr-cpus", i)); i++) {
+ for_each_possible_cpu(cpu)
+ if (arch_find_n_match_cpu_physical_id(np, cpu, NULL))
+ break;
+
+ of_node_put(np);
+ if (cpu >= nr_cpu_ids) {
+ pr_err("Failed to find logical CPU for %s\n", np->name);
+ return -EINVAL;
+ }
+
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return -EINVAL;
+
+ /*
+ * Keep cpu_dev and its regulator and clock for monitoring
+ * voltage changes and updating OPPs later.
+ */
+ if (i == 0) {
+ drv->cpu_dev = cpu_dev;
+ drv->vdd_apc = devm_regulator_get(cpu_dev, "cpu");
+ if (IS_ERR(drv->vdd_apc))
+ return PTR_ERR(drv->vdd_apc);
+ drv->cpu_clk = devm_clk_get(cpu_dev, NULL);
+ if (IS_ERR(drv->cpu_clk))
+ return PTR_ERR(drv->cpu_clk);
+ }
+
+ for (j = 0, corner = drv->corners; plan[j]; j++, corner++) {
+ p = plan[j];
+ ret = dev_pm_opp_add(cpu_dev, p->freq, corner->uV);
+ if (ret)
+ return ret;
+ corner->freq = p->freq;
+ }
+ }
+
+ return 0;
+}
+
+static const struct corner_data **
+find_freq_plan(const struct cpr_desc *desc, u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ const struct freq_plan *p;
+
+ for (i = 0; i < desc->num_freq_plans; i++) {
+ p = &desc->freq_plans[i];
+
+ if (p->speed_bin != speed_bin &&
+ p->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (p->pvs_version != pvs_version &&
+ p->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return p->plan;
+ }
+
+ return NULL;
+
+}
+
+static struct corner_override *find_corner_override(const struct cpr_desc *desc,
+ u32 speed_bin, u32 pvs_version)
+{
+ int i;
+ struct corner_override *o;
+
+ for (i = 0; i < desc->num_corner_overrides; i++) {
+ o = &desc->corner_overrides[i];
+
+ if (o->speed_bin != speed_bin &&
+ o->speed_bin != FUSE_PARAM_MATCH_ANY)
+ continue;
+ if (o->pvs_version != pvs_version &&
+ o->pvs_version != FUSE_PARAM_MATCH_ANY)
+ continue;
+
+ return o;
+ }
+
+ return NULL;
+
+}
+
+static void cpr_corner_init(struct cpr_drv *drv, const struct cpr_desc *desc,
+ const struct cpr_fuse *fuses, u32 speed_bin,
+ u32 pvs_version, void __iomem *qfprom,
+ const struct corner_adjustment *adjustments,
+ const struct corner_data **plan)
+{
+ int i, fnum, quot_diff, scaling;
+ struct fuse_corner *fuse, *prev_fuse;
+ struct corner *corner, *end;
+ const struct corner_data *cdata, *p;
+ const struct fuse_corner_data *fdata;
+ bool apply_scaling = false;
+ const int *adj_quot, *adj_volt, *adj_quot_offset;
+ const struct qfprom_offset *quot_offset;
+ unsigned long freq_corner, freq_diff, freq_diff_mhz;
+ unsigned long freq_high, freq_low;
+ int volt_high;
+ u64 temp, temp_limit;
+ int step_volt = 12500; /* TODO: Get from regulator APIs */
+ const struct corner_override *override;
+
+ corner = drv->corners;
+ end = &corner[drv->num_corners - 1];
+ cdata = desc->corner_data;
+ fdata = desc->cpr_fuses.fuse_corner_data;
+ adj_quot = adjustments ? adjustments->quot : NULL;
+ adj_volt = adjustments ? adjustments->init_uV : NULL;
+ adj_quot_offset = adjustments ? adjustments->fuse_quot_offset : NULL;
+
+ override = find_corner_override(desc, speed_bin, pvs_version);
+
+ /*
+ * Store maximum frequency for each fuse corner based on the frequency
+ * plan
+ */
+ for (i = 0; plan[i]; i++) {
+ p = plan[i];
+ freq_corner = p->freq;
+ fnum = p->fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (freq_corner > fuse->max_freq)
+ fuse->max_freq = freq_corner;
+
+ }
+
+ /*
+ * Get the quotient adjustment scaling factor, according to:
+ *
+ * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
+ * / (freq(corner_N) - freq(corner_N-1)), max_factor)
+ *
+ * QUOT(corner_N): quotient read from fuse for fuse corner N
+ * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
+ * freq(corner_N): max frequency in MHz supported by fuse corner N
+ * freq(corner_N-1): max frequency in MHz supported by fuse corner
+ * (N - 1)
+ *
+ * Then walk through the corners mapped to each fuse corner
+ * and calculate the quotient adjustment for each one using the
+ * following formula:
+ *
+ * quot_adjust = (freq_max - freq_corner) * scaling / 1000
+ *
+ * freq_max: max frequency in MHz supported by the fuse corner
+ * freq_corner: frequency in MHz corresponding to the corner
+ * scaling: calculated from above equation
+ *
+ *
+ * + +
+ * | v |
+ * q | f c o | f c
+ * u | c l | c
+ * o | f t | f
+ * t | c a | c
+ * | c f g | c f
+ * | e |
+ * +--------------- +----------------
+ * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
+ * corner corner
+ *
+ * c = corner
+ * f = fuse corner
+ *
+ */
+ for (apply_scaling = false, i = 0; corner <= end; corner++, i++) {
+ freq_corner = cdata[i].freq;
+ fnum = cdata[i].fuse_corner;
+ fuse = &drv->fuse_corners[fnum];
+ if (fnum)
+ prev_fuse = &drv->fuse_corners[fnum - 1];
+ else
+ prev_fuse = NULL;
+
+ corner->fuse_corner = fuse;
+ corner->uV = fuse->uV;
+ if (prev_fuse && cdata[i - 1].freq == prev_fuse->max_freq) {
+ quot_offset = &fuses[fnum].quotient_offset;
+ if (quot_offset->width) {
+ quot_diff = cpr_read_efuse(qfprom, quot_offset);
+ quot_diff *= fdata->quot_scale;
+ if (adj_quot_offset)
+ quot_diff += adj_quot_offset[fnum];
+ } else {
+ quot_diff = fuse->quot - prev_fuse->quot;
+ }
+
+ freq_diff = fuse->max_freq - prev_fuse->max_freq;
+ freq_diff /= 1000000; /* Convert to MHz */
+ scaling = 1000 * quot_diff / freq_diff;
+ scaling = min(scaling, fdata[fnum].max_quot_scale);
+
+ apply_scaling = true;
+ } else if (freq_corner == fuse->max_freq) {
+ /* This is a fuse corner; don't scale anything */
+ apply_scaling = false;
+ }
+
+ if (apply_scaling) {
+ freq_diff = fuse->max_freq - freq_corner;
+ freq_diff_mhz = freq_diff / 1000000;
+ corner->quot_adjust = scaling * freq_diff_mhz / 1000;
+
+ freq_high = fuse->max_freq;
+ freq_low = fuse->max_freq;
+ volt_high = fuse->uV;
+
+ /*
+ if (freq_high > freq_low && volt_high > volt_low &&
+ freq_high > freq_corner)
+ */
+
+ temp = freq_diff * (fuse->uV - prev_fuse->uV);
+ do_div(temp, freq_high - freq_low);
+
+ /*
+ * max_volt_scale has units of uV/MHz while freq values
+ * have units of Hz. Divide by 1000000 to convert to.
+ */
+ temp_limit = freq_diff * fdata[fnum].max_volt_scale;
+ do_div(temp_limit, 1000000);
+
+ corner->uV = volt_high - min(temp, temp_limit);
+ corner->uV = roundup(corner->uV, step_volt);
+ }
+
+ if (adj_quot)
+ corner->quot_adjust -= adj_quot[i];
+
+ if (adj_volt)
+ corner->uV += adj_volt[i];
+
+ /* Load per corner ceiling and floor voltages if they exist. */
+ if (override) {
+ corner->max_uV = override->max_uV[i];
+ corner->min_uV = override->min_uV[i];
+ } else {
+ corner->max_uV = fuse->max_uV;
+ corner->min_uV = fuse->min_uV;
+ }
+
+ if (drv->ceiling_max < corner->max_uV)
+ drv->ceiling_max = corner->max_uV;
+
+ corner->uV = clamp(corner->uV, corner->min_uV, corner->max_uV);
+ corner->last_uV = corner->uV;
+
+ /* Reduce the ceiling voltage if needed */
+ if (desc->reduce_to_corner_uV && corner->uV < corner->max_uV)
+ corner->max_uV = corner->uV;
+ else if (desc->reduce_to_fuse_uV && fuse->uV < corner->max_uV)
+ corner->max_uV = max(corner->min_uV, fuse->uV);
+
+ pr_debug("corner %d: [%d %d %d] quot %d\n", i,
+ corner->min_uV, corner->uV, corner->max_uV,
+ fuse->quot - corner->quot_adjust);
+ }
+}
+
+static const struct cpr_fuse *
+cpr_get_fuses(const struct cpr_desc *desc, void __iomem *qfprom)
+{
+ u32 expected = desc->cpr_fuses.redundant_value;
+ const struct qfprom_offset *fuse = &desc->cpr_fuses.redundant;
+ unsigned int idx;
+
+ idx = !!(fuse->width && cpr_read_efuse(qfprom, fuse) == expected);
+
+ return &desc->cpr_fuses.cpr_fuse[idx * desc->num_fuse_corners];
+}
+
+static bool cpr_is_close_loop_disabled(struct cpr_drv *drv,
+ const struct cpr_desc *desc, void __iomem *qfprom,
+ const struct cpr_fuse *fuses,
+ const struct corner_adjustment *adj)
+{
+ const struct qfprom_offset *disable;
+ unsigned int idx;
+ struct fuse_corner *highest_fuse, *second_highest_fuse;
+ int min_diff_quot, diff_quot;
+
+ if (adj && adj->disable_closed_loop)
+ return true;
+
+ if (!desc->cpr_fuses.disable)
+ return false;
+
+ /*
+ * Are the fuses the redundant ones? This avoids reading the fuse
+ * redundant bit again
+ */
+ idx = !!(fuses == desc->cpr_fuses.cpr_fuse);
+ disable = &desc->cpr_fuses.disable[idx];
+
+ if (cpr_read_efuse(qfprom, disable))
+ return true;
+
+ if (!fuses->quotient_offset.width) {
+ /*
+ * Check if the target quotients for the highest two fuse
+ * corners are too close together.
+ */
+ highest_fuse = &drv->fuse_corners[drv->num_fuse_corners - 1];
+ second_highest_fuse = highest_fuse - 1;
+
+ min_diff_quot = desc->min_diff_quot;
+ diff_quot = highest_fuse->quot - second_highest_fuse->quot;
+
+ return diff_quot < min_diff_quot;
+ }
+
+ return false;
+}
+
+static int cpr_init_parameters(struct platform_device *pdev,
+ struct cpr_drv *drv)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ int ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-ref-clk",
+ &drv->ref_clk_khz);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-delay-us",
+ &drv->timer_delay_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-up",
+ &drv->timer_cons_up);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,cpr-timer-cons-down",
+ &drv->timer_cons_down);
+ if (ret)
+ return ret;
+ drv->timer_cons_down &= RBIF_TIMER_ADJ_CONS_DOWN_MASK;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-up-threshold",
+ &drv->up_threshold);
+ drv->up_threshold &= RBCPR_CTL_UP_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-down-threshold",
+ &drv->down_threshold);
+ drv->down_threshold &= RBCPR_CTL_DN_THRESHOLD_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-idle-clocks",
+ &drv->idle_clocks);
+ drv->idle_clocks &= RBCPR_STEP_QUOT_IDLE_CLK_MASK;
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-gcnt-us", &drv->gcnt_us);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-up-limit",
+ &drv->vdd_apc_step_up_limit);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(of_node, "qcom,vdd-apc-step-down-limit",
+ &drv->vdd_apc_step_down_limit);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "qcom,cpr-clamp-timer-interval",
+ &drv->clamp_timer_interval);
+ if (ret && ret != -EINVAL)
+ return ret;
+
+ drv->clamp_timer_interval = min_t(unsigned int,
+ drv->clamp_timer_interval,
+ RBIF_TIMER_ADJ_CLAMP_INT_MASK);
+
+ pr_debug("up threshold = %u, down threshold = %u\n",
+ drv->up_threshold, drv->down_threshold);
+
+ return 0;
+}
+
+static int cpr_init_and_enable_corner(struct cpr_drv *drv)
+{
+ unsigned long rate;
+ struct corner *end;
+
+ end = &drv->corners[drv->num_corners - 1];
+ rate = clk_get_rate(drv->cpu_clk);
+
+ for (drv->corner = drv->corners; drv->corner <= end; drv->corner++)
+ if (drv->corner->freq == rate)
+ break;
+
+ if (drv->corner > end)
+ return -EINVAL;
+
+ return cpr_enable(drv);
+}
+
+static struct corner_data msm8916_corner_data[] = {
+ /* [corner] -> { fuse corner, freq } */
+ { 0, 200000000 },
+ { 0, 400000000 },
+ { 1, 533330000 },
+ { 1, 800000000 },
+ { 2, 998400000 },
+ { 2, 1094400000 },
+ { 2, 1152000000 },
+ { 2, 1209600000 },
+ { 2, 1363200000 },
+};
+
+static const struct cpr_desc msm8916_desc = {
+ .num_fuse_corners = 3,
+ .vdd_mx_vmin_method = VDD_MX_VMIN_APC_CORNER_MAP,
+ .min_diff_quot = CPR_FUSE_MIN_QUOT_DIFF,
+ .step_quot = (int []){ 26, 26, 26, 26, 26, 26, 26, 26 },
+ .cpr_fuses = {
+ .init_voltage_step = 10000,
+ .fuse_corner_data = (struct fuse_corner_data[]){
+ /* ref_uV max_uV min_uV max_q q_off q_scl v_scl mx */
+ { 1050000, 1050000, 1050000, 0, 0, 1, 0, 3 },
+ { 1150000, 1150000, 1050000, 0, 0, 1, 0, 4 },
+ { 1350000, 1350000, 1162500, 650, 0, 1, 0, 6 },
+ },
+ .cpr_fuse = (struct cpr_fuse[]){
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 220, 6, 2 },
+ .quotient = { 221, 12, 2 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 218, 6, 2 },
+ .quotient = { 219, 12, 0 },
+ },
+ {
+ .ring_osc = { 222, 3, 6},
+ .init_voltage = { 216, 6, 0 },
+ .quotient = { 216, 12, 6 },
+ },
+ },
+ .disable = &(struct qfprom_offset){ 223, 1, 1 },
+ },
+ .speed_bin = { 12, 3, 2 },
+ .pvs_version = { 6, 2, 7 },
+ .corner_data = msm8916_corner_data,
+ .num_corners = ARRAY_SIZE(msm8916_corner_data),
+ .num_freq_plans = 3,
+ .freq_plans = (struct freq_plan[]){
+ {
+ .speed_bin = 0,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 0,
+ .pvs_version = 1,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ NULL
+ },
+ },
+ {
+ .speed_bin = 2,
+ .pvs_version = 0,
+ .plan = (const struct corner_data* []){
+ msm8916_corner_data + 0,
+ msm8916_corner_data + 1,
+ msm8916_corner_data + 2,
+ msm8916_corner_data + 3,
+ msm8916_corner_data + 4,
+ msm8916_corner_data + 5,
+ msm8916_corner_data + 6,
+ msm8916_corner_data + 7,
+ msm8916_corner_data + 8,
+ NULL
+ },
+ },
+ },
+};
+
+static const struct acc_desc msm8916_acc_desc = {
+ .settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x101 }
+ },
+ .override_settings = (struct reg_sequence[]){
+ { 0xf000, 0 },
+ { 0xf000, 0x100 },
+ { 0xf000, 0x100 }
+ },
+ .num_regs_per_fuse = 1,
+ .override = { 6, 1, 4 },
+ .override_value = 1,
+};
+
+static const struct of_device_id cpr_descs[] = {
+ { .compatible = "qcom,qfprom-msm8916", .data = &msm8916_desc },
+ { }
+};
+
+static const struct of_device_id acc_descs[] = {
+ { .compatible = "qcom,tcsr-msm8916", .data = &msm8916_acc_desc },
+ { }
+};
+
+static int cpr_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct cpr_drv *drv;
+ const struct cpr_fuse *cpr_fuses;
+ const struct corner_adjustment *adj;
+ const struct corner_data **plan;
+ size_t len;
+ int irq, ret;
+ const struct cpr_desc *desc;
+ const struct acc_desc *acc_desc;
+ const struct of_device_id *match;
+ struct device_node *np;
+ void __iomem *qfprom;
+ u32 cpr_rev = FUSE_REVISION_UNKNOWN;
+ u32 speed_bin = SPEED_BIN_NONE;
+ u32 pvs_version = 0;
+ struct platform_device_info devinfo = { .name = "cpufreq-dt", };
+
+ np = of_parse_phandle(dev->of_node, "eeprom", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(cpr_descs, np);
+ if (!match)
+ return -EINVAL;
+ desc = match->data;
+
+ /* TODO: Get from eeprom API */
+ qfprom = devm_ioremap(dev, 0x58000, 0x7000);
+ if (!qfprom)
+ return -ENOMEM;
+
+ len = sizeof(*drv) +
+ sizeof(*drv->fuse_corners) * desc->num_fuse_corners +
+ sizeof(*drv->corners) * desc->num_corners;
+
+ drv = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ np = of_parse_phandle(dev->of_node, "acc-syscon", 0);
+ if (!np)
+ return -ENODEV;
+
+ match = of_match_node(acc_descs, np);
+ if (!match)
+ return -EINVAL;
+
+ acc_desc = match->data;
+ drv->tcsr = syscon_node_to_regmap(np);
+ if (IS_ERR(drv->tcsr))
+ return PTR_ERR(drv->tcsr);
+
+ drv->num_fuse_corners = desc->num_fuse_corners;
+ drv->num_corners = desc->num_corners;
+ drv->fuse_corners = (struct fuse_corner *)(drv + 1);
+ drv->corners = (struct corner *)(drv->fuse_corners +
+ drv->num_fuse_corners);
+ mutex_init(&drv->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->base))
+ return PTR_ERR(drv->base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
+
+ drv->vdd_mx = devm_regulator_get(dev, "vdd-mx");
+ if (IS_ERR(drv->vdd_mx))
+ return PTR_ERR(drv->vdd_mx);
+
+ drv->vdd_mx_vmin_method = desc->vdd_mx_vmin_method;
+ drv->vdd_mx_vmax = desc->vdd_mx_vmax;
+
+ if (desc->fuse_revision.width)
+ cpr_rev = cpr_read_efuse(qfprom, &desc->fuse_revision);
+ if (desc->speed_bin.width)
+ speed_bin = cpr_read_efuse(qfprom, &desc->speed_bin);
+ if (desc->pvs_version.width)
+ pvs_version = cpr_read_efuse(qfprom, &desc->pvs_version);
+
+ plan = find_freq_plan(desc, speed_bin, pvs_version);
+ if (!plan)
+ return -EINVAL;
+
+ cpr_fuses = cpr_get_fuses(desc, qfprom);
+ cpr_populate_ring_osc_idx(cpr_fuses, drv, qfprom);
+
+ adj = cpr_find_adjustment(speed_bin, pvs_version, cpr_rev, desc, drv);
+
+ cpr_fuse_corner_init(drv, desc, qfprom, cpr_fuses, speed_bin, adj,
+ acc_desc);
+ cpr_corner_init(drv, desc, cpr_fuses, speed_bin, pvs_version, qfprom,
+ adj, plan);
+
+ ret = cpr_populate_opps(dev->of_node, drv, plan);
+ if (ret)
+ return ret;
+
+ drv->loop_disabled = cpr_is_close_loop_disabled(drv, desc, qfprom,
+ cpr_fuses, adj);
+ pr_info("CPR closed loop is %sabled\n",
+ drv->loop_disabled ? "dis" : "en");
+
+ ret = cpr_init_parameters(pdev, drv);
+ if (ret)
+ return ret;
+
+ /* Configure CPR HW but keep it disabled */
+ ret = cpr_config(drv);
+ if (ret)
+ return ret;
+
+ /* Enable ACC if required */
+ if (acc_desc->enable_mask)
+ regmap_update_bits(drv->tcsr, acc_desc->enable_reg,
+ acc_desc->enable_mask,
+ acc_desc->enable_mask);
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ cpr_irq_handler, IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ "cpr", drv);
+ if (ret)
+ return ret;
+
+ ret = cpr_init_and_enable_corner(drv);
+ if (ret)
+ return ret;
+
+ drv->reg_nb.notifier_call = cpr_regulator_notifier;
+ ret = regulator_register_notifier(drv->vdd_apc, &drv->reg_nb);
+ if (ret)
+ return ret;
+
+ drv->cpufreq_nb.notifier_call = cpr_cpufreq_notifier;
+ ret = cpufreq_register_notifier(&drv->cpufreq_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ regulator_unregister_notifier(drv->vdd_apc, &drv->reg_nb);
+ return ret;
+ }
+
+ /*
+ * Ensure that enable state accurately reflects the case in which CPR
+ * is permanently disabled.
+ */
+ //cpr_vreg->enable &= !cpr_vreg->loop_disabled;
+
+ platform_set_drvdata(pdev, drv);
+
+ return PTR_ERR_OR_ZERO(platform_device_register_full(&devinfo));
+}
+
+static int cpr_remove(struct platform_device *pdev)
+{
+ struct cpr_drv *drv = platform_get_drvdata(pdev);
+
+ if (cpr_is_allowed(drv)) {
+ cpr_ctl_disable(drv);
+ cpr_irq_set(drv, 0);
+ }
+
+ return 0;
+}
+
+static const struct of_device_id cpr_match_table[] = {
+ { .compatible = "qcom,cpr" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cpr_match_table);
+
+static struct platform_driver cpr_driver = {
+ .probe = cpr_probe,
+ .remove = cpr_remove,
+ .driver = {
+ .name = "qcom-cpr",
+ .of_match_table = cpr_match_table,
+ .pm = &cpr_pm_ops,
+ },
+};
+module_platform_driver(cpr_driver);
+
+MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qcom-cpr");
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 64bccff557be..2d675c1b982d 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -546,6 +546,18 @@ config REGULATOR_QCOM_SPMI
Qualcomm SPMI PMICs as a module. The module will be named
"qcom_spmi-regulator".
+config REGULATOR_QCOM_SMD_RPM
+ tristate "Qualcomm SMD based RPM regulator driver"
+ depends on MFD_QCOM_SMD_RPM
+ help
+ If you say yes to this option, support will be included for the
+ regulators exposed by the Resource Power Manager found in Qualcomm
+ 8974 based devices.
+
+ Say M here if you want to include support for the regulators on the
+ Qualcomm RPM as a module. The module will be named
+ "qcom_smd-regulator".
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 0f8174913c17..be39fe4b5cdc 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -64,6 +64,7 @@ obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_PWM) += pwm-regulator.o
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 9c6167dd2c8b..86b3cfcd0106 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -20,6 +20,9 @@
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/soc/qcom/smd-rpm.h>
+#include <linux/regulator/qcom_smd-regulator.h>
+
+#include "internal.h"
struct qcom_rpm_reg {
struct device *dev;
@@ -44,6 +47,11 @@ struct rpm_regulator_req {
#define RPM_KEY_SWEN 0x6e657773 /* "swen" */
#define RPM_KEY_UV 0x00007675 /* "uv" */
#define RPM_KEY_MA 0x0000616d /* "ma" */
+#define RPM_KEY_FLOOR 0x00636676 /* "vfc" */
+#define RPM_KEY_CORNER 0x6e726f63 /* "corn" */
+
+#define RPM_MIN_FLOOR_CORNER 0
+#define RPM_MAX_FLOOR_CORNER 6
static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
struct rpm_regulator_req *req,
@@ -56,6 +64,50 @@ static int rpm_reg_write_active(struct qcom_rpm_reg *vreg,
req, size);
}
+int qcom_rpm_set_floor(struct regulator *regulator, int floor)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_FLOOR;
+ req.nbytes = sizeof(u32);
+ req.value = floor;
+
+ if (floor < RPM_MIN_FLOOR_CORNER || floor > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set floor %d\n", floor);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_floor);
+
+int qcom_rpm_set_corner(struct regulator *regulator, int corner)
+{
+ struct regulator_dev *rdev = regulator->rdev;
+ struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
+ struct rpm_regulator_req req;
+ int ret;
+
+ req.key = RPM_KEY_CORNER;
+ req.nbytes = sizeof(u32);
+ req.value = corner;
+
+ if (corner < RPM_MIN_FLOOR_CORNER || corner > RPM_MAX_FLOOR_CORNER)
+ return -EINVAL;
+
+ ret = rpm_reg_write_active(vreg, &req, sizeof(req));
+ if (ret)
+ dev_err(rdev_get_dev(rdev), "Failed to set corner %d\n", corner);
+
+ return ret;
+}
+EXPORT_SYMBOL(qcom_rpm_set_corner);
+
static int rpm_reg_enable(struct regulator_dev *rdev)
{
struct qcom_rpm_reg *vreg = rdev_get_drvdata(rdev);
@@ -211,6 +263,43 @@ static const struct regulator_desc pm8941_switch = {
.ops = &rpm_switch_ops,
};
+static const struct regulator_desc pm8916_pldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(750000, 0, 208, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 209,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_nldo = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 93, 12500),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 94,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_lvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(375000, 0, 95, 12500),
+ REGULATOR_LINEAR_RANGE(750000, 96, 127, 25000),
+ },
+ .n_linear_ranges = 2,
+ .n_voltages = 128,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm8916_buck_hvo_smps = {
+ .linear_ranges = (struct regulator_linear_range[]) {
+ REGULATOR_LINEAR_RANGE(1550000, 0, 31, 25000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 32,
+ .ops = &rpm_smps_ldo_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -272,9 +361,36 @@ static const struct rpm_regulator_data rpm_pm8941_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm8916_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm8916_buck_lvo_smps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm8916_buck_lvo_smps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm8916_buck_lvo_smps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm8916_buck_hvo_smps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm8916_nldo, "vdd_l1_l2_l3" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm8916_pldo, "vdd_l4_l5_l6" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm8916_pldo, "vdd_l7" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm8916_pldo, "vdd_l8_l9_l10_l11_l12_l13_l14_l15_l16_l17_l18"},
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
{ .compatible = "qcom,rpm-pm8941-regulators", .data = &rpm_pm8941_regulators },
+ { .compatible = "qcom,rpm-pm8916-regulators", .data = &rpm_pm8916_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 28c711f0ac6b..54d5b637d14c 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -77,4 +77,20 @@ config DA8XX_REMOTEPROC
It's safe to say n here if you're not interested in multimedia
offloading.
+config QCOM_Q6V5_PIL
+ tristate "Qualcomm Hexagon V5 Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ help
+ Say y here to support the Qualcomm Peripherial Image Loader for the
+ Hexagon V5 based remote processors.
+
+config QCOM_TZ_PIL
+ tristate "Qualcomm TrustZone based Peripherial Image Loader"
+ depends on OF && ARCH_QCOM
+ select REMOTEPROC
+ help
+ Say y here to support the TrustZone based Qualcomm Peripherial Image
+ Loader.
+
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 81b04d1e2e58..4351f2e462ad 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_STE_MODEM_RPROC) += ste_modem_rproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
+obj-$(CONFIG_QCOM_Q6V5_PIL) += qcom_q6v5_pil.o
+obj-$(CONFIG_QCOM_TZ_PIL) += qcom_tz_pil.o
diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
new file mode 100644
index 000000000000..4d63a84db588
--- /dev/null
+++ b/drivers/remoteproc/qcom_q6v5_pil.c
@@ -0,0 +1,1075 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/elf.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/reset.h>
+
+#include "remoteproc_internal.h"
+
+#include <linux/qcom_scm.h>
+
+#define SCM_SVC_PIL 0x2
+
+struct qproc {
+ struct device *dev;
+ struct rproc *rproc;
+
+ void __iomem *reg_base;
+ void __iomem *halt_base;
+ void __iomem *rmb_base;
+
+ struct reset_control *mss_restart;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct gpio_desc *stop_gpio;
+
+ struct regulator *vdd;
+ struct regulator *cx;
+ struct regulator *mx;
+ struct regulator *pll;
+
+ struct clk *ahb_clk;
+ struct clk *axi_clk;
+ struct clk *rom_clk;
+
+ struct completion start_done;
+
+ void *mba_va;
+ dma_addr_t mba_da;
+ size_t mba_size;
+ struct dma_attrs mba_attrs;
+};
+
+#define VDD_MSS_UV 1000000
+#define VDD_MSS_UV_MAX 1150000
+#define VDD_MSS_UA 100000
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB 0x010
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE 0x180
+#define MSS_MODEM_HALT_BASE 0x200
+#define MSS_NC_HALT_BASE 0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS 0x1
+#define STATUS_XPU_UNLOCKED 0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED 0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE 0x00
+#define RMB_PBL_STATUS 0x04
+#define RMB_MBA_COMMAND 0x08
+#define RMB_MBA_STATUS 0x0C
+#define RMB_PMI_META_DATA 0x10
+#define RMB_PMI_CODE_START 0x14
+#define RMB_PMI_CODE_LENGTH 0x18
+
+#define POLL_INTERVAL_US 50
+
+#define CMD_META_DATA_READY 0x1
+#define CMD_LOAD_READY 0x2
+
+#define STATUS_META_DATA_AUTH_SUCCESS 0x3
+#define STATUS_AUTH_COMPLETE 0x4
+
+/* External BHS */
+#define EXTERNAL_BHS_ON BIT(0)
+#define EXTERNAL_BHS_STATUS BIT(4)
+#define BHS_TIMEOUT_US 50
+
+#define MSS_RESTART_ID 0xA
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET 0x014
+#define QDSP6SS_GFMUX_CTL 0x020
+#define QDSP6SS_PWR_CTL 0x030
+#define QDSP6SS_STRAP_ACC 0x110
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ 0x0
+#define AXI_HALTACK 0x4
+#define AXI_IDLE 0x8
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE BIT(0)
+#define Q6SS_CORE_ARES BIT(1)
+#define Q6SS_BUS_ARES_ENA BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA BIT(1)
+#define Q6SS_CLK_SRC_SEL_C BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD 0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0 BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1 BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2 BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N BIT(16)
+#define Q6SS_ETB_SLP_NRET_N BIT(17)
+#define Q6SS_L2DATA_STBY_N BIT(18)
+#define Q6SS_SLP_RET_N BIT(19)
+#define Q6SS_CLAMP_IO BIT(20)
+#define QDSS_BHS_ON BIT(21)
+#define QDSS_LDO_BYP BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON BIT(26)
+#define QDSP6v55_LDO_BYP BIT(25)
+#define QDSP6v55_BHS_ON BIT(24)
+#define QDSP6v55_CLAMP_WL BIT(21)
+#define L1IU_SLP_NRET_N BIT(15)
+#define L1DU_SLP_NRET_N BIT(14)
+#define L2PLRU_SLP_NRET_N BIT(13)
+
+#define HALT_CHECK_MAX_LOOPS (200)
+#define QDSP6SS_XO_CBCR (0x0038)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
+
+static int qproc_sanity_check(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ if (!fw) {
+ dev_err(&rproc->dev, "failed to load %s\n", rproc->name);
+ return -EINVAL;
+ }
+
+ /* XXX: ??? */
+
+ return 0;
+}
+
+static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+
+static int qproc_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct qproc *qproc = rproc->priv;
+ DEFINE_DMA_ATTRS(attrs);
+ dma_addr_t phys;
+ dma_addr_t end;
+ void *ptr;
+
+ dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
+
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ return -ENOMEM;
+ }
+
+ end = phys + fw->size;
+ dev_info(qproc->dev, "loading MBA from %pa to %pa\n", &phys, &end);
+
+ memcpy(ptr, fw->data, fw->size);
+
+ qproc->mba_va = ptr;
+ qproc->mba_da = phys;
+ qproc->mba_size = fw->size;
+ qproc->mba_attrs = attrs;
+
+ return 0;
+}
+
+static const struct rproc_fw_ops qproc_fw_ops = {
+ .find_rsc_table = qproc_find_rsc_table,
+ .load = qproc_load,
+ .sanity_check = qproc_sanity_check,
+};
+
+static void q6v5proc_reset(struct qproc *qproc)
+{
+ u32 val;
+
+ /* Assert resets, stop core */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+
+ /* Enable power block headswitch, and wait for it to stabilize */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ mb();
+ udelay(1);
+
+ /*
+ * Turn on memories. L2 banks should be done individually
+ * to minimize inrush current.
+ */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+ Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_2;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_1;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+ val |= Q6SS_L2DATA_SLP_NRET_N_0;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Remove IO clamp */
+ val &= ~Q6SS_CLAMP_IO;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
+
+ /* Bring core out of reset */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_CORE_ARES;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+
+ /* Turn on core clock */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_GFMUX_CTL);
+ val |= Q6SS_CLK_ENA;
+
+#if 0
+ /* Need a different clock source for v5.2.0 */
+ if (qproc->qdsp6v5_2_0) {
+ val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+ val |= Q6SS_CLK_SRC_SEL_C;
+ }
+
+ /* force clock on during source switch */
+ if (qproc->qdsp6v56)
+ val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+#endif
+
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_GFMUX_CTL);
+
+ /* Start core execution */
+ val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
+ val &= ~Q6SS_STOP_CORE;
+ writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
+}
+
+static void q6v5proc_halt_axi_port(struct qproc *qproc, void __iomem *halt)
+{
+ unsigned long timeout;
+
+ if (readl_relaxed(halt + AXI_IDLE))
+ return;
+
+ /* Assert halt request */
+ writel_relaxed(1, halt + AXI_HALTREQ);
+
+ /* Wait for halt */
+ timeout = jiffies + 10 * HZ;
+ for (;;) {
+ if (readl(halt + AXI_HALTACK) || time_after(jiffies, timeout))
+ break;
+
+ msleep(1);
+ }
+
+ if (!readl_relaxed(halt + AXI_IDLE))
+ dev_err(qproc->dev, "port %pa failed halt\n", &halt);
+
+ /* Clear halt request (port will remain halted until reset) */
+ writel_relaxed(0, halt + AXI_HALTREQ);
+}
+
+static int qproc_mba_load_mdt(struct qproc *qproc, const struct firmware *fw)
+{
+ DEFINE_DMA_ATTRS(attrs);
+ unsigned long timeout;
+ dma_addr_t phys;
+ dma_addr_t end;
+ void *ptr;
+ int ret;
+ s32 val;
+
+ dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
+ dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
+
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ return -ENOMEM;
+ }
+
+ end = phys + fw->size;
+ dev_info(qproc->dev, "loading mdt header from %pa to %pa\n", &phys, &end);
+
+ memcpy(ptr, fw->data, fw->size);
+
+ writel_relaxed(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+
+ writel_relaxed(phys, qproc->rmb_base + RMB_PMI_META_DATA);
+ writel(CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val == STATUS_META_DATA_AUTH_SUCCESS || val < 0)
+ break;
+
+ if (time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA authentication of headers timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (val < 0) {
+ dev_err(qproc->dev, "MBA returned error %d for headers\n", val);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dev_err(qproc->dev, "mdt authenticated\n");
+
+ ret = 0;
+out:
+ dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);
+
+ return ret;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int
+qproc_load_segments(struct qproc *qproc, const struct firmware *fw)
+{
+ struct device *dev = qproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+ const struct firmware *seg_fw;
+ char fw_name[20];
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ if (segment_is_hash(phdr->p_flags))
+ continue;
+
+ if (filesz == 0)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ ptr = ioremap(da, memsz);
+ if (!ptr) {
+ dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
+ ret = -ENOMEM;
+ break;
+ }
+
+ if (filesz) {
+ snprintf(fw_name, sizeof(fw_name), "modem.b%02d", i);
+ ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+ if (ret) {
+ iounmap(ptr);
+ break;
+ }
+
+ memcpy(ptr, seg_fw->data, filesz);
+
+ release_firmware(seg_fw);
+ }
+
+ if (memsz > filesz)
+ memset(ptr + filesz, 0, memsz - filesz);
+
+ wmb();
+ iounmap(ptr);
+ }
+
+ return ret;
+}
+
+static int qproc_verify_segments(struct qproc *qproc, const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ const u8 *elf_data = fw->data;
+ unsigned long timeout;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ u32 size = 0;
+ s32 val;
+ int ret;
+ int i;
+ u32 v;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+ msleep(1);
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+#if 1
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ phys_addr_t da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+
+ dev_err(qproc->dev, "0x%x %d %d\n", phdr->p_paddr, segment_is_hash(phdr->p_flags), !!(phdr->p_flags & BIT(27)));
+
+ if (segment_is_hash(phdr->p_flags))
+ continue;
+
+ if (memsz == 0)
+ continue;
+
+ if (da < min_addr)
+ min_addr = da;
+
+ size += memsz;
+ }
+
+ dev_err(qproc->dev, "verify: %pa:%pa\n", &min_addr, &size);
+
+ writel_relaxed(min_addr, qproc->rmb_base + RMB_PMI_CODE_START);
+ writel(CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+#endif
+
+ v = readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %pa\n", &v);
+
+#if 0
+ writel_relaxed(0x08400000, qproc->rmb_base + RMB_PMI_CODE_START);
+ writel_relaxed(CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND);
+
+ size = 0;
+ size += 0x162f4;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x5f7620;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x719e1c;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x14000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x2b929;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x0d500;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x19ab8;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x16d68;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x124a98;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x103588;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0xbf99b0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0xa07a0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x12000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x01500;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x792878;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x256c44;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x14fee4;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x20d13c0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x2c4f0;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x3a2a8;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ size += 0x3ca000;
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+ writel_relaxed(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH);
+ dev_err(qproc->dev, "RMB_PMI_CODE_LENGTH: %x\n", readl_relaxed(qproc->rmb_base + RMB_PMI_CODE_LENGTH));
+ dev_err(qproc->dev, "RMB_MBA_STATUS: 0x%x\n", readl(qproc->rmb_base + RMB_MBA_STATUS));
+#endif
+
+ timeout = jiffies + 10 * HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val == STATUS_AUTH_COMPLETE || val < 0)
+ break;
+
+ if (time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA authentication of headers timed out\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ } else if (val < 0) {
+ dev_err(qproc->dev, "MBA returned error %d for segments\n", val);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+out:
+ return ret;
+}
+
+static int qproc_load_modem(struct qproc *qproc)
+{
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, "modem.mdt", qproc->dev);
+ if (ret < 0) {
+ dev_err(qproc->dev, "unable to load modem.mdt\n");
+ return ret;
+ }
+
+ ret = qproc_mba_load_mdt(qproc, fw);
+ if (ret)
+ goto out;
+
+ ret = qproc_load_segments(qproc, fw);
+ if (ret)
+ goto out;
+
+ ret = qproc_verify_segments(qproc, fw);
+ if (ret)
+ goto out;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int qproc_start(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+ unsigned long timeout;
+ int ret;
+ u32 val;
+
+ ret = regulator_enable(qproc->vdd);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable mss vdd\n");
+ return ret;
+ }
+
+ ret = reset_control_deassert(qproc->mss_restart);
+ if (ret) {
+ dev_err(qproc->dev, "failed to deassert mss restart\n");
+ goto disable_vdd;
+ }
+
+ ret = clk_prepare_enable(qproc->ahb_clk);
+ if (ret)
+ goto assert_reset;
+
+ ret = clk_prepare_enable(qproc->axi_clk);
+ if (ret)
+ goto disable_ahb_clk;
+
+ ret = clk_prepare_enable(qproc->rom_clk);
+ if (ret)
+ goto disable_axi_clk;
+
+ writel_relaxed(qproc->mba_da, qproc->rmb_base + RMB_MBA_IMAGE);
+
+ /* Ensure order of data/entry point and the following reset release */
+ wmb();
+
+ q6v5proc_reset(qproc);
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_PBL_STATUS);
+ if (val || time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "PBL boot timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ } else if (val != STATUS_PBL_SUCCESS) {
+ dev_err(qproc->dev, "PBL returned unexpected status %d\n", val);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ timeout = jiffies + HZ;
+ for (;;) {
+ msleep(1);
+
+ val = readl(qproc->rmb_base + RMB_MBA_STATUS);
+ if (val || time_after(jiffies, timeout))
+ break;
+ }
+ if (val == 0) {
+ dev_err(qproc->dev, "MBA boot timed out\n");
+ ret = -ETIMEDOUT;
+ goto halt_axi_ports;
+ } else if (val != STATUS_XPU_UNLOCKED && val != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+ dev_err(qproc->dev, "MBA returned unexpected status %d\n", val);
+ ret = -EINVAL;
+ goto halt_axi_ports;
+ }
+
+ dev_info(qproc->dev, "MBA boot done\n");
+
+ ret = qproc_load_modem(qproc);
+ if (ret)
+ goto halt_axi_ports;
+
+ return 0;
+
+halt_axi_ports:
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+disable_axi_clk:
+ clk_disable_unprepare(qproc->axi_clk);
+disable_ahb_clk:
+ clk_disable_unprepare(qproc->ahb_clk);
+assert_reset:
+ reset_control_assert(qproc->mss_restart);
+disable_vdd:
+ regulator_disable(qproc->vdd);
+
+ dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);
+ return ret;
+}
+
+static int qproc_stop(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
+ q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
+
+ reset_control_assert(qproc->mss_restart);
+ clk_disable_unprepare(qproc->axi_clk);
+ clk_disable_unprepare(qproc->ahb_clk);
+ regulator_disable(qproc->vdd);
+
+ dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);
+
+ return 0;
+}
+
+static const struct rproc_ops qproc_ops = {
+ .start = qproc_start,
+ .stop = qproc_stop,
+};
+
+static irqreturn_t qproc_wdog_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " WATCHDOG\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_fatal_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " FATAL\n");
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_ready_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " READY\n");
+
+ complete(&qproc->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_handover_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " HANDOVER\n");
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ dev_err(qproc->dev, " STOP-ACK\n");
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t qproc_boot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = rproc_boot(qproc->rproc);
+ return ret ? : size;
+}
+
+static ssize_t qproc_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+
+ rproc_shutdown(qproc->rproc);
+ return size;
+}
+
+static const struct device_attribute qproc_attrs[] = {
+ __ATTR(boot, S_IWUSR, 0, qproc_boot_store),
+ __ATTR(shutdown, S_IWUSR, 0, qproc_shutdown_store),
+};
+
+static int qproc_init_mem(struct qproc *qproc, struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+ qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->reg_base))
+ return PTR_ERR(qproc->reg_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+ qproc->halt_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->halt_base))
+ return PTR_ERR(qproc->halt_base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb_base");
+ qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(qproc->rmb_base))
+ return PTR_ERR(qproc->rmb_base);
+
+ return 0;
+}
+
+static int qproc_init_clocks(struct qproc *qproc)
+{
+ qproc->ahb_clk = devm_clk_get(qproc->dev, "iface");
+ if (IS_ERR(qproc->ahb_clk))
+ return PTR_ERR(qproc->ahb_clk);
+
+ qproc->axi_clk = devm_clk_get(qproc->dev, "bus");
+ if (IS_ERR(qproc->axi_clk))
+ return PTR_ERR(qproc->axi_clk);
+
+ qproc->rom_clk = devm_clk_get(qproc->dev, "mem");
+ if (IS_ERR(qproc->rom_clk))
+ return PTR_ERR(qproc->rom_clk);
+
+ return 0;
+}
+
+static int qproc_init_regulators(struct qproc *qproc)
+{
+ int ret;
+ u32 uV;
+
+ qproc->vdd = devm_regulator_get(qproc->dev, "qcom,vdd");
+ if (IS_ERR(qproc->vdd))
+ return PTR_ERR(qproc->vdd);
+
+ regulator_set_voltage(qproc->vdd, VDD_MSS_UV, VDD_MSS_UV_MAX);
+ regulator_set_load(qproc->vdd, VDD_MSS_UA);
+
+ qproc->cx = devm_regulator_get(qproc->dev, "qcom,cx");
+ if (IS_ERR(qproc->cx))
+ return PTR_ERR(qproc->cx);
+
+ qproc->mx = devm_regulator_get(qproc->dev, "qcom,mx");
+ if (IS_ERR(qproc->mx))
+ return PTR_ERR(qproc->mx);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,mx-uV", &uV);
+ if (!ret)
+ regulator_set_voltage(qproc->mx, uV, uV);
+
+ qproc->pll = devm_regulator_get(qproc->dev, "qcom,pll");
+ if (IS_ERR(qproc->pll))
+ return PTR_ERR(qproc->pll);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uV", &uV);
+ if (!ret)
+ regulator_set_voltage(qproc->pll, uV, uV);
+
+ return 0;
+}
+
+static int qproc_init_reset(struct qproc *qproc)
+{
+ qproc->mss_restart = devm_reset_control_get(qproc->dev, NULL);
+ if (IS_ERR(qproc->mss_restart)) {
+ dev_err(qproc->dev, "failed to acquire mss restart\n");
+ return PTR_ERR(qproc->mss_restart);
+ }
+
+ return 0;
+}
+
+static int qproc_request_irq(struct qproc *qproc,
+ struct platform_device *pdev,
+ const char *name,
+ irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "qproc", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int qproc_probe(struct platform_device *pdev)
+{
+ struct qproc *qproc;
+ struct rproc *rproc;
+ int ret;
+ int i;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &qproc_ops,
+ "mba.b00", sizeof(*qproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->fw_ops = &qproc_fw_ops;
+
+ qproc = (struct qproc *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+
+ ret = qproc_init_mem(qproc, pdev);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_regulators(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_reset(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_request_irq(qproc, pdev, "wdog", qproc_wdog_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->wdog_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "fatal", qproc_fatal_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->fatal_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "ready", qproc_ready_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->ready_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "handover", qproc_handover_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->handover_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "stop-ack", qproc_stop_ack_interrupt);
+ if (ret < 0)
+ goto free_rproc;
+ qproc->stop_ack_irq = ret;
+
+ qproc->stop_gpio = devm_gpiod_get(&pdev->dev, "qcom,stop", GPIOD_OUT_LOW);
+ if (IS_ERR(qproc->stop_gpio)) {
+ dev_err(&pdev->dev, "failed to acquire stop gpio\n");
+ return PTR_ERR(qproc->stop_gpio);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &qproc_attrs[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs file\n");
+ goto remove_device_files;
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_device_files;
+
+ return 0;
+
+remove_device_files:
+ for (i--; i >= 0; i--)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int qproc_remove(struct platform_device *pdev)
+{
+ struct qproc *qproc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id qproc_of_match[] = {
+ { .compatible = "qcom,q6v5-pil", },
+ { },
+};
+
+static struct platform_driver qproc_driver = {
+ .probe = qproc_probe,
+ .remove = qproc_remove,
+ .driver = {
+ .name = "qcom-q6v5-pil",
+ .of_match_table = qproc_of_match,
+ },
+};
+
+module_platform_driver(qproc_driver);
diff --git a/drivers/remoteproc/qcom_tz_pil.c b/drivers/remoteproc/qcom_tz_pil.c
new file mode 100644
index 000000000000..2f43b00fee13
--- /dev/null
+++ b/drivers/remoteproc/qcom_tz_pil.c
@@ -0,0 +1,719 @@
+/*
+ * Qualcomm Peripheral Image Loader
+ *
+ * Copyright (C) 2014 Sony Mobile Communications AB
+ * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/remoteproc.h>
+#include <linux/interrupt.h>
+#include <linux/memblock.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/elf.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/qcom_scm.h>
+#include <linux/soc/qcom/smem.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/soc/qcom/smd.h>
+
+#include "remoteproc_internal.h"
+
+#define PAS_INIT_IMAGE_CMD 1
+#define PAS_MEM_SETUP_CMD 2
+#define PAS_AUTH_AND_RESET_CMD 5
+#define PAS_SHUTDOWN_CMD 6
+#define PAS_IS_SUPPORTED_CMD 7
+
+struct qproc {
+ struct device *dev;
+ struct rproc *rproc;
+
+ int pas_id;
+
+ int wdog_irq;
+ int fatal_irq;
+ int ready_irq;
+ int handover_irq;
+ int stop_ack_irq;
+
+ struct gpio_desc *stop_gpio;
+
+ const char *name;
+ struct regulator *pll;
+
+ unsigned proxy_clk_count;
+ struct clk *scm_core_clk;
+ struct clk *scm_iface_clk;
+ struct clk *scm_bus_clk;
+ struct clk *scm_src_clk;
+
+ struct clk **proxy_clks;
+
+ struct completion start_done;
+ struct completion stop_done;
+
+ unsigned crash_reason;
+ struct device_node *smd_edge_node;
+
+ phys_addr_t reloc_phys;
+ size_t reloc_size;
+};
+
+static int qproc_scm_clk_enable(struct qproc *qproc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(qproc->scm_core_clk);
+ if (ret)
+ goto bail;
+ ret = clk_prepare_enable(qproc->scm_iface_clk);
+ if (ret)
+ goto disable_core;
+ ret = clk_prepare_enable(qproc->scm_bus_clk);
+ if (ret)
+ goto disable_iface;
+
+ ret = clk_prepare_enable(qproc->scm_src_clk);
+ if (ret)
+ goto disable_bus;
+
+ return 0;
+
+disable_bus:
+ clk_disable_unprepare(qproc->scm_bus_clk);
+disable_iface:
+ clk_disable_unprepare(qproc->scm_iface_clk);
+disable_core:
+ clk_disable_unprepare(qproc->scm_core_clk);
+bail:
+ return ret;
+}
+
+static void qproc_scm_clk_disable(struct qproc *qproc)
+{
+ clk_disable_unprepare(qproc->scm_core_clk);
+ clk_disable_unprepare(qproc->scm_iface_clk);
+ clk_disable_unprepare(qproc->scm_bus_clk);
+ clk_disable_unprepare(qproc->scm_src_clk);
+}
+
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct mdt_hdr {
+ struct elf32_hdr hdr;
+ struct elf32_phdr phdr[];
+};
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+ return (p->p_type == PT_LOAD) &&
+ !segment_is_hash(p->p_flags) &&
+ p->p_memsz;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+ return !!(p->p_flags & BIT(27));
+}
+
+/**
+ * rproc_mdt_sanity_check() - sanity check mdt firmware header
+ * @rproc: the remote processor handle
+ * @fw: the mdt header firmware image
+ */
+static int qproc_sanity_check(struct rproc *rproc,
+ const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr;
+ struct mdt_hdr *mdt;
+
+ if (!fw) {
+ dev_err(&rproc->dev, "failed to load %s\n", rproc->name);
+ return -EINVAL;
+ }
+
+ if (fw->size < sizeof(struct elf32_hdr)) {
+ dev_err(&rproc->dev, "image is too small\n");
+ return -EINVAL;
+ }
+
+ mdt = (struct mdt_hdr *)fw->data;
+ ehdr = &mdt->hdr;
+
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(&rproc->dev, "image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
+ if (ehdr->e_phnum == 0) {
+ dev_err(&rproc->dev, "no loadable segments\n");
+ return -EINVAL;
+ }
+
+ if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+ sizeof(struct elf32_hdr) > fw->size) {
+ dev_err(&rproc->dev, "firmware size is too small\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
+ const struct firmware *fw,
+ int *tablesz)
+{
+ static struct resource_table table = { .ver = 1, };
+
+ *tablesz = sizeof(table);
+ return &table;
+}
+
+static int qproc_load_segment(struct rproc *rproc, const char *fw_name,
+ const struct elf32_phdr *phdr, phys_addr_t paddr)
+{
+ const struct firmware *fw;
+ void *ptr;
+ int ret = 0;
+
+ ptr = ioremap_nocache(paddr, phdr->p_memsz);
+ if (!ptr) {
+ dev_err(&rproc->dev, "failed to ioremap segment area (%pa+0x%x)\n", &paddr, phdr->p_memsz);
+ return -EBUSY;
+ }
+
+ if (phdr->p_filesz) {
+ ret = request_firmware(&fw, fw_name, &rproc->dev);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to load %s\n", fw_name);
+ goto out;
+ }
+
+ memcpy_toio(ptr, fw->data, fw->size);
+
+ release_firmware(fw);
+ }
+
+ if (phdr->p_memsz > phdr->p_filesz)
+ memset_io(ptr + phdr->p_filesz, 0,
+ phdr->p_memsz - phdr->p_filesz);
+
+out:
+ iounmap(ptr);
+ return ret;
+}
+
+static int qproc_load(struct rproc *rproc, const struct firmware *fw)
+{
+ const struct elf32_phdr *phdr;
+ const struct elf32_hdr *ehdr;
+ const struct mdt_hdr *mdt;
+ phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX;
+ phys_addr_t max_addr = 0;
+ phys_addr_t diff_addr;
+ struct qproc *qproc = rproc->priv;
+ char *fw_name;
+ int ret;
+ int i;
+ size_t align = 0;
+ bool relocatable = false;
+ phys_addr_t paddr;
+
+ ret = qproc_scm_clk_enable(qproc);
+ if (ret)
+ return ret;
+
+ mdt = (struct mdt_hdr *)fw->data;
+ ehdr = &mdt->hdr;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ if (phdr->p_paddr < min_addr) {
+ min_addr = phdr->p_paddr;
+
+ if (segment_is_relocatable(phdr)) {
+ align = phdr->p_align;
+ relocatable = true;
+ }
+ }
+
+ if (phdr->p_paddr + phdr->p_memsz > max_addr)
+ max_addr = round_up(phdr->p_paddr + phdr->p_memsz, SZ_4K);
+ }
+
+ ret = qcom_scm_pas_init_image(qproc->dev,
+ qproc->pas_id, fw->data, fw->size);
+ if (ret) {
+ dev_err(qproc->dev, "Invalid firmware metadata\n");
+ return -EINVAL;
+ }
+
+ diff_addr = max_addr - min_addr;
+ dev_dbg(qproc->dev, "pas_mem_setup %pa, %pa\n", &min_addr, &diff_addr);
+
+ ret = qcom_scm_pas_mem_setup(qproc->pas_id,
+ relocatable ? qproc->reloc_phys : min_addr, max_addr - min_addr);
+ if (ret) {
+ dev_err(qproc->dev, "unable to setup memory for image\n");
+ return -EINVAL;
+ }
+
+ fw_name = kzalloc(strlen(qproc->name) + 5, GFP_KERNEL);
+ if (!fw_name)
+ return -ENOMEM;
+
+ for (i = 0; i < ehdr->e_phnum; i++) {
+ phdr = &mdt->phdr[i];
+
+ if (!segment_is_loadable(phdr))
+ continue;
+
+ paddr = relocatable ?
+ (phdr->p_paddr - min_addr + qproc->reloc_phys) :
+ phdr->p_paddr;
+ sprintf(fw_name, "%s.b%02d", qproc->name, i);
+ ret = qproc_load_segment(rproc, fw_name, phdr, paddr);
+ if (ret)
+ break;
+ }
+
+ kfree(fw_name);
+
+ qproc_scm_clk_disable(qproc);
+
+ return 0;
+}
+
+const struct rproc_fw_ops qproc_fw_ops = {
+ .find_rsc_table = qproc_find_rsc_table,
+ .load = qproc_load,
+ .sanity_check = qproc_sanity_check,
+};
+
+static int qproc_start(struct rproc *rproc)
+{
+ struct qproc *qproc = (struct qproc *)rproc->priv;
+ int ret;
+
+ ret = regulator_enable(qproc->pll);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable pll supply\n");
+ return ret;
+ }
+
+ ret = qproc_scm_clk_enable(qproc);
+ if (ret)
+ goto disable_regulator;
+
+ ret = qcom_scm_pas_auth_and_reset(qproc->pas_id);
+ if (ret) {
+ dev_err(qproc->dev,
+ "failed to authenticate image and release reset\n");
+ goto unroll_clocks;
+ }
+
+ /* if ready irq not provided skip waiting */
+ if (qproc->ready_irq < 0)
+ goto done;
+
+ ret = wait_for_completion_timeout(&qproc->start_done, msecs_to_jiffies(10000));
+ if (ret == 0) {
+ dev_err(qproc->dev, "start timed out\n");
+
+ qcom_scm_pas_shutdown(qproc->pas_id);
+ goto unroll_clocks;
+ }
+
+done:
+ dev_info(qproc->dev, "start successful\n");
+
+ return 0;
+
+unroll_clocks:
+ qproc_scm_clk_disable(qproc);
+
+disable_regulator:
+ regulator_disable(qproc->pll);
+
+ return ret;
+}
+
+static int qproc_stop(struct rproc *rproc)
+{
+ return 0;
+}
+
+static const struct rproc_ops qproc_ops = {
+ .start = qproc_start,
+ .stop = qproc_stop,
+};
+
+static irqreturn_t qproc_wdog_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ rproc_report_crash(qproc->rproc, RPROC_WATCHDOG);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_fatal_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+ size_t len;
+ char *msg;
+ int ret;
+
+ ret = qcom_smem_get(-1, qproc->crash_reason, (void**)&msg, &len);
+ if (!ret && len > 0 && msg[0])
+ dev_err(qproc->dev, "fatal error received: %s\n", msg);
+
+ rproc_report_crash(qproc->rproc, RPROC_FATAL_ERROR);
+
+ if (!ret)
+ msg[0] = '\0';
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_ready_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ complete(&qproc->start_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_handover_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ qproc_scm_clk_disable(qproc);
+ regulator_disable(qproc->pll);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qproc_stop_ack_interrupt(int irq, void *dev)
+{
+ struct qproc *qproc = dev;
+
+ complete(&qproc->stop_done);
+ return IRQ_HANDLED;
+}
+
+static ssize_t qproc_boot_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = rproc_boot(qproc->rproc);
+ return ret ? : size;
+}
+
+static ssize_t qproc_shutdown_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct qproc *qproc = dev_get_drvdata(dev);
+
+ rproc_shutdown(qproc->rproc);
+ return size;
+}
+
+static const struct device_attribute qproc_attrs[] = {
+ __ATTR(boot, S_IWUSR, 0, qproc_boot_store),
+ __ATTR(shutdown, S_IWUSR, 0, qproc_shutdown_store),
+};
+
+static int qproc_init_pas(struct qproc *qproc)
+{
+ char *key;
+ int ret;
+
+ key = "qcom,pas-id";
+ ret = of_property_read_u32(qproc->dev->of_node, key, &qproc->pas_id);
+ if (ret) {
+ dev_err(qproc->dev, "Missing or incorrect %s\n", key);
+ return -EINVAL;
+ }
+
+ if (!qcom_scm_pas_supported(qproc->pas_id)) {
+ dev_err(qproc->dev, "PAS is not available for %d\n", qproc->pas_id);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int qproc_init_clocks(struct qproc *qproc)
+{
+ long rate;
+ int ret;
+
+ qproc->scm_core_clk = devm_clk_get(qproc->dev, "scm_core_clk");
+ if (IS_ERR(qproc->scm_core_clk)) {
+ if (PTR_ERR(qproc->scm_core_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_core_clk\n");
+ return PTR_ERR(qproc->scm_core_clk);
+ }
+
+ qproc->scm_iface_clk = devm_clk_get(qproc->dev, "scm_iface_clk");
+ if (IS_ERR(qproc->scm_iface_clk)) {
+ if (PTR_ERR(qproc->scm_iface_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_iface_clk\n");
+ return PTR_ERR(qproc->scm_iface_clk);
+ }
+
+ qproc->scm_bus_clk = devm_clk_get(qproc->dev, "scm_bus_clk");
+ if (IS_ERR(qproc->scm_bus_clk)) {
+ if (PTR_ERR(qproc->scm_bus_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_bus_clk\n");
+ return PTR_ERR(qproc->scm_bus_clk);
+ }
+
+ qproc->scm_src_clk = devm_clk_get(qproc->dev, "scm_src_clk");
+ if (IS_ERR(qproc->scm_src_clk)) {
+ if (PTR_ERR(qproc->scm_src_clk) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to acquire scm_src_clk\n");
+ return PTR_ERR(qproc->scm_src_clk);
+ }
+
+ ret = clk_set_rate(qproc->scm_core_clk,
+clk_round_rate(qproc->scm_core_clk, 19200000));
+ ret = clk_set_rate(qproc->scm_bus_clk,
+clk_round_rate(qproc->scm_bus_clk, 19200000));
+ ret = clk_set_rate(qproc->scm_iface_clk,
+clk_round_rate(qproc->scm_iface_clk, 19200000));
+ rate = clk_round_rate(qproc->scm_core_clk, 80000000);
+ ret = clk_set_rate(qproc->scm_src_clk, rate);
+ if (ret) {
+ dev_err(qproc->dev, "failed to set rate of scm_core_clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qproc_init_regulators(struct qproc *qproc)
+{
+ int ret;
+ u32 uA;
+ u32 uV;
+
+ qproc->pll = devm_regulator_get(qproc->dev, "qcom,pll");
+ if (IS_ERR(qproc->pll)) {
+ if (PTR_ERR(qproc->pll) != -EPROBE_DEFER)
+ dev_err(qproc->dev, "failed to aquire regulator\n");
+ return PTR_ERR(qproc->pll);
+ }
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uV", &uV);
+ if (ret)
+ dev_warn(qproc->dev, "failed to read qcom,pll_uV, skipping\n");
+ else
+ regulator_set_voltage(qproc->pll, uV, uV);
+
+ ret = of_property_read_u32(qproc->dev->of_node, "qcom,pll-uA", &uA);
+ if (ret)
+ dev_warn(qproc->dev, "failed to read qcom,pll_uA, skipping\n");
+ else
+ regulator_set_load(qproc->pll, uA);
+
+ return 0;
+}
+
+static int qproc_request_irq(struct qproc *qproc, struct platform_device *pdev, const char *name, irq_handler_t thread_fn)
+{
+ int ret;
+
+ ret = platform_get_irq_byname(pdev, name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "no %s IRQ defined\n", name);
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, ret,
+ NULL, thread_fn,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "qproc", qproc);
+ if (ret)
+ dev_err(&pdev->dev, "request %s IRQ failed\n", name);
+ return ret;
+}
+
+static int qproc_probe(struct platform_device *pdev)
+{
+ struct qproc *qproc;
+ struct rproc *rproc;
+ char *fw_name;
+ const char *name;
+ const char *key;
+ int ret;
+ int i;
+ struct device_node *np;
+ struct resource r;
+
+
+ key = "qcom,firmware-name";
+ ret = of_property_read_string(pdev->dev.of_node, key, &name);
+ if (ret) {
+ dev_err(&pdev->dev, "missing or incorrect %s\n", key);
+ return -EINVAL;
+ }
+
+ fw_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s.mdt", name);
+ if (!fw_name)
+ return -ENOMEM;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, &qproc_ops,
+ fw_name, sizeof(*qproc));
+ if (!rproc) {
+ dev_err(&pdev->dev, "unable to allocate remoteproc\n");
+ return -ENOMEM;
+ }
+
+ rproc->fw_ops = &qproc_fw_ops;
+
+ qproc = (struct qproc *)rproc->priv;
+ qproc->dev = &pdev->dev;
+ qproc->rproc = rproc;
+ qproc->name = name;
+ platform_set_drvdata(pdev, qproc);
+
+ init_completion(&qproc->start_done);
+ init_completion(&qproc->stop_done);
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,crash-reason",
+ &qproc->crash_reason);
+ if (ret)
+ dev_info(&pdev->dev, "no crash reason id\n");
+
+ qproc->smd_edge_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,smd-edges", 0);
+
+ ret = qproc_init_pas(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_clocks(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_init_regulators(qproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = qproc_request_irq(qproc, pdev, "wdog", qproc_wdog_interrupt);
+ qproc->wdog_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "fatal", qproc_fatal_interrupt);
+ qproc->fatal_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "ready", qproc_ready_interrupt);
+ qproc->ready_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "handover", qproc_handover_interrupt);
+ qproc->handover_irq = ret;
+
+ ret = qproc_request_irq(qproc, pdev, "stop-ack", qproc_stop_ack_interrupt);
+ qproc->stop_ack_irq = ret;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++) {
+ ret = device_create_file(&pdev->dev, &qproc_attrs[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to create sysfs file\n");
+ goto remove_device_files;
+ }
+ }
+
+ np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!np) {
+ dev_err(&pdev->dev, "No memory region specified\n");
+ } else {
+
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ qproc->reloc_phys = r.start;
+ qproc->reloc_size = resource_size(&r);
+
+ dev_info(&pdev->dev, "Found relocation area %lu@%pad\n",
+ qproc->reloc_size, &qproc->reloc_phys);
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_device_files;
+
+ return 0;
+
+remove_device_files:
+ for (i--; i >= 0; i--)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+free_rproc:
+ rproc_put(rproc);
+
+ return ret;
+}
+
+static int qproc_remove(struct platform_device *pdev)
+{
+ struct qproc *qproc = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qproc_attrs); i++)
+ device_remove_file(&pdev->dev, &qproc_attrs[i]);
+
+ rproc_put(qproc->rproc);
+
+ return 0;
+}
+
+static const struct of_device_id qproc_of_match[] = {
+ { .compatible = "qcom,tz-pil", },
+ { },
+};
+
+static struct platform_driver qproc_driver = {
+ .probe = qproc_probe,
+ .remove = qproc_remove,
+ .driver = {
+ .name = "qcom-tz-pil",
+ .of_match_table = qproc_of_match,
+ },
+};
+
+module_platform_driver(qproc_driver);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8b3130f22b42..39656aa723b6 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -57,6 +57,8 @@ static DEFINE_IDA(rproc_dev_index);
static const char * const rproc_crash_names[] = {
[RPROC_MMUFAULT] = "mmufault",
+ [RPROC_WATCHDOG] = "watchdog",
+ [RPROC_FATAL_ERROR] = "fatal error",
};
/* translate rproc_crash_type to string */
@@ -789,6 +791,8 @@ static void rproc_resource_cleanup(struct rproc *rproc)
}
}
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw);
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -799,13 +803,16 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
struct resource_table *table, *loaded_table;
int ret, tablesz;
- if (!rproc->table_ptr)
- return -ENOMEM;
-
ret = rproc_fw_sanity_check(rproc, fw);
if (ret)
return ret;
+ if (!rproc->table_ptr) {
+ ret = __rproc_fw_config_virtio(rproc, fw);
+ if (ret)
+ return ret;
+ }
+
dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
/*
@@ -854,12 +861,8 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
* copy this information to device memory.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (!loaded_table) {
- ret = -EINVAL;
- goto clean_up;
- }
-
- memcpy(loaded_table, rproc->cached_table, tablesz);
+ if (loaded_table)
+ memcpy(loaded_table, rproc->cached_table, tablesz);
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -895,19 +898,15 @@ clean_up:
* to unregister the device. one other option is just to use kref here,
* that might be cleaner).
*/
-static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+static int __rproc_fw_config_virtio(struct rproc *rproc, const struct firmware *fw)
{
- struct rproc *rproc = context;
struct resource_table *table;
int ret, tablesz;
- if (rproc_fw_sanity_check(rproc, fw) < 0)
- goto out;
-
/* look for the resource table */
table = rproc_find_rsc_table(rproc, fw, &tablesz);
if (!table)
- goto out;
+ return -EINVAL;
rproc->table_csum = crc32(0, table, tablesz);
@@ -919,7 +918,7 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
*/
rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
if (!rproc->cached_table)
- goto out;
+ return -ENOMEM;
rproc->table_ptr = rproc->cached_table;
@@ -928,12 +927,21 @@ static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
ret = rproc_handle_resources(rproc, tablesz,
rproc_count_vrings_handler);
if (ret)
- goto out;
+ return ret;
/* look for virtio devices and register them */
ret = rproc_handle_resources(rproc, tablesz, rproc_vdev_handler);
-out:
+ return ret;
+}
+
+static void rproc_fw_config_virtio(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+
+ if (rproc_fw_sanity_check(rproc, fw) >= 0)
+ __rproc_fw_config_virtio(rproc, fw);
+
release_firmware(fw);
/* allow rproc_del() contexts, if any, to proceed */
complete_all(&rproc->firmware_loading_complete);
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 10a93d168e0e..41b668b0d836 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,3 +1,4 @@
+obj-$(CONFIG_ARM64) += cpu_ops.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
diff --git a/drivers/soc/qcom/cpu_ops.c b/drivers/soc/qcom/cpu_ops.c
new file mode 100644
index 000000000000..d831cb071d3d
--- /dev/null
+++ b/drivers/soc/qcom/cpu_ops.c
@@ -0,0 +1,343 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/* MSM ARMv8 CPU Operations
+ * Based on arch/arm64/kernel/smp_spin_table.c
+ */
+
+#include <linux/bitops.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+#include <linux/qcom_scm.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+DEFINE_PER_CPU(int, cold_boot_done);
+
+#if 0
+static int cold_boot_flags[] = {
+ 0,
+ QCOM_SCM_FLAG_COLDBOOT_CPU1,
+ QCOM_SCM_FLAG_COLDBOOT_CPU2,
+ QCOM_SCM_FLAG_COLDBOOT_CPU3,
+};
+#endif
+
+/* CPU power domain register offsets */
+#define CPU_PWR_CTL 0x4
+#define CPU_PWR_GATE_CTL 0x14
+#define LDO_BHS_PWR_CTL 0x28
+
+/* L2 power domain register offsets */
+#define L2_PWR_CTL_OVERRIDE 0xc
+#define L2_PWR_CTL 0x14
+#define L2_PWR_STATUS 0x18
+#define L2_CORE_CBCR 0x58
+#define L1_RST_DIS 0x284
+
+#define L2_SPM_STS 0xc
+#define L2_VREG_CTL 0x1c
+
+#define SCM_IO_READ 1
+#define SCM_IO_WRITE 2
+
+/*
+ * struct msm_l2ccc_of_info: represents of data for l2 cache clock controller.
+ * @compat: compat string for l2 cache clock controller
+ * @l2_pon: l2 cache power on routine
+ */
+struct msm_l2ccc_of_info {
+ const char *compat;
+ int (*l2_power_on) (struct device_node *dn, u32 l2_mask, int cpu);
+ u32 l2_power_on_mask;
+};
+
+
+static int power_on_l2_msm8916(struct device_node *l2ccc_node, u32 pon_mask,
+ int cpu)
+{
+ u32 pon_status;
+ void __iomem *l2_base;
+
+ l2_base = of_iomap(l2ccc_node, 0);
+ if (!l2_base)
+ return -ENOMEM;
+
+ /* Skip power-on sequence if l2 cache is already powered up*/
+ pon_status = (__raw_readl(l2_base + L2_PWR_STATUS) & pon_mask)
+ == pon_mask;
+ if (pon_status) {
+ iounmap(l2_base);
+ return 0;
+ }
+
+ /* Close L2/SCU Logic GDHS and power up the cache */
+ writel_relaxed(0x10D700, l2_base + L2_PWR_CTL);
+
+ /* Assert PRESETDBGn */
+ writel_relaxed(0x400000, l2_base + L2_PWR_CTL_OVERRIDE);
+ mb();
+ udelay(2);
+
+ /* De-assert L2/SCU memory Clamp */
+ writel_relaxed(0x101700, l2_base + L2_PWR_CTL);
+
+ /* Wakeup L2/SCU RAMs by deasserting sleep signals */
+ writel_relaxed(0x101703, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* Enable clocks via SW_CLK_EN */
+ writel_relaxed(0x01, l2_base + L2_CORE_CBCR);
+
+ /* De-assert L2/SCU logic clamp */
+ writel_relaxed(0x101603, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert PRESSETDBg */
+ writel_relaxed(0x0, l2_base + L2_PWR_CTL_OVERRIDE);
+
+ /* De-assert L2/SCU Logic reset */
+ writel_relaxed(0x100203, l2_base + L2_PWR_CTL);
+ mb();
+ udelay(54);
+
+ /* Turn on the PMIC_APC */
+ writel_relaxed(0x10100203, l2_base + L2_PWR_CTL);
+
+ /* Set H/W clock control for the cpu CBC block */
+ writel_relaxed(0x03, l2_base + L2_CORE_CBCR);
+ mb();
+ iounmap(l2_base);
+
+ return 0;
+}
+
+static const struct msm_l2ccc_of_info l2ccc_info[] = {
+ {
+ .compat = "qcom,8916-l2ccc",
+ .l2_power_on = power_on_l2_msm8916,
+ .l2_power_on_mask = BIT(9),
+ },
+};
+
+static int power_on_l2_cache(struct device_node *l2ccc_node, int cpu)
+{
+ int ret, i;
+ const char *compat;
+
+ ret = of_property_read_string(l2ccc_node, "compatible", &compat);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(l2ccc_info); i++) {
+ const struct msm_l2ccc_of_info *ptr = &l2ccc_info[i];
+
+ if (!of_compat_cmp(ptr->compat, compat, strlen(compat)))
+ return ptr->l2_power_on(l2ccc_node,
+ ptr->l2_power_on_mask, cpu);
+ }
+ pr_err("Compat string not found for L2CCC node\n");
+ return -EIO;
+}
+
+static int msm_unclamp_secondary_arm_cpu(unsigned int cpu)
+{
+
+ int ret = 0;
+ struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node;
+ void __iomem *reg;
+
+ cpu_node = of_get_cpu_node(cpu, NULL);
+ if (!cpu_node)
+ return -ENODEV;
+
+ acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0);
+ if (!acc_node) {
+ ret = -ENODEV;
+ goto out_acc;
+ }
+
+ l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0);
+ if (!l2_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0);
+ if (!l2ccc_node) {
+ ret = -ENODEV;
+ goto out_l2;
+ }
+
+ /* Ensure L2-cache of the CPU is powered on before
+ * unclamping cpu power rails.
+ */
+ ret = power_on_l2_cache(l2ccc_node, cpu);
+ if (ret) {
+ pr_err("L2 cache power up failed for CPU%d\n", cpu);
+ goto out_l2ccc;
+ }
+
+ reg = of_iomap(acc_node, 0);
+ if (!reg) {
+ ret = -ENOMEM;
+ goto out_acc_reg;
+ }
+
+ /* Assert Reset on cpu-n */
+ writel_relaxed(0x00000033, reg + CPU_PWR_CTL);
+ mb();
+
+ /*Program skew to 16 X0 clock cycles*/
+ writel_relaxed(0x10000001, reg + CPU_PWR_GATE_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert coremem clamp */
+ writel_relaxed(0x00000031, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Close coremem array gdhs */
+ writel_relaxed(0x00000039, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n clamp */
+ writel_relaxed(0x00020038, reg + CPU_PWR_CTL);
+ mb();
+ udelay(2);
+
+ /* De-assert cpu-n reset */
+ writel_relaxed(0x00020008, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Assert PWRDUP signal on core-n */
+ writel_relaxed(0x00020088, reg + CPU_PWR_CTL);
+ mb();
+
+ /* Secondary CPU-N is now alive */
+ iounmap(reg);
+out_acc_reg:
+ of_node_put(l2ccc_node);
+out_l2ccc:
+ of_node_put(l2_node);
+out_l2:
+ of_node_put(acc_node);
+out_acc:
+ of_node_put(cpu_node);
+
+ return ret;
+}
+
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ smp_wmb();
+ __flush_dcache_area(start, size);
+}
+
+static int secondary_pen_release(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+ write_pen_release(cpu_logical_map(cpu));
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+static int __init msm_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ /* Mark CPU0 cold boot flag as done */
+ if (!cpu && !per_cpu(cold_boot_done, cpu))
+ per_cpu(cold_boot_done, cpu) = true;
+
+ return 0;
+}
+
+static int __init msm_cpu_prepare(unsigned int cpu)
+{
+ const cpumask_t *mask = cpumask_of(cpu);
+
+ if (qcom_scm_set_cold_boot_addr(secondary_holding_pen, mask)) {
+ pr_warn("CPU%d:Failed to set boot address\n", cpu);
+ return -ENOSYS;
+ }
+
+ return 0;
+}
+
+static int msm_cpu_boot(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (per_cpu(cold_boot_done, cpu) == false) {
+ ret = msm_unclamp_secondary_arm_cpu(cpu);
+ if (ret)
+ return ret;
+ per_cpu(cold_boot_done, cpu) = true;
+ }
+ return secondary_pen_release(cpu);
+}
+
+void msm_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+static const struct cpu_operations msm_cortex_a_ops = {
+ .name = "qcom,arm-cortex-acc",
+ .cpu_init = msm_cpu_init,
+ .cpu_prepare = msm_cpu_prepare,
+ .cpu_boot = msm_cpu_boot,
+ .cpu_postboot = msm_cpu_postboot,
+};
+CPU_METHOD_OF_DECLARE(msm_cortex_a_ops, &msm_cortex_a_ops);
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 1392ccf14a20..f58d02e51bb8 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -121,7 +121,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
pkt.hdr.length = sizeof(struct qcom_rpm_request) + count;
pkt.req.msg_id = msg_id++;
- pkt.req.flags = BIT(state);
+ pkt.req.flags = state;
pkt.req.type = type;
pkt.req.id = id;
pkt.req.data_len = count;
@@ -212,6 +212,7 @@ static void qcom_smd_rpm_remove(struct qcom_smd_device *sdev)
static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8974" },
+ { .compatible = "qcom,rpm-msm8916" },
{}
};
MODULE_DEVICE_TABLE(of, qcom_smd_rpm_of_match);
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index a6155c917d52..b51332e0c54f 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -104,9 +104,9 @@ static const struct {
* @channels: list of all channels detected on this edge
* @channels_lock: guard for modifications of @channels
* @allocated: array of bitmaps representing already allocated channels
- * @need_rescan: flag that the @work needs to scan smem for new channels
* @smem_available: last available amount of smem triggering a channel scan
- * @work: work item for edge house keeping
+ * @scan_work: work item for discovering new channels
+ * @state_work: work item for edge state changes
*/
struct qcom_smd_edge {
struct qcom_smd *smd;
@@ -121,29 +121,19 @@ struct qcom_smd_edge {
int ipc_bit;
struct list_head channels;
- spinlock_t channels_lock;
+ rwlock_t channels_lock;
DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
- bool need_rescan;
unsigned smem_available;
- struct work_struct work;
-};
+ wait_queue_head_t new_channel_event;
-/*
- * SMD channel states.
- */
-enum smd_channel_state {
- SMD_CHANNEL_CLOSED,
- SMD_CHANNEL_OPENING,
- SMD_CHANNEL_OPENED,
- SMD_CHANNEL_FLUSHING,
- SMD_CHANNEL_CLOSING,
- SMD_CHANNEL_RESET,
- SMD_CHANNEL_RESET_OPENING
+ struct work_struct scan_work;
+ struct work_struct state_work;
};
+
/**
* struct qcom_smd_channel - smd channel struct
* @edge: qcom_smd_edge this channel is living on
@@ -166,38 +156,6 @@ enum smd_channel_state {
* @pkt_size: size of the currently handled packet
* @list: lite entry for @channels in qcom_smd_edge
*/
-struct qcom_smd_channel {
- struct qcom_smd_edge *edge;
-
- struct qcom_smd_device *qsdev;
-
- char *name;
- enum smd_channel_state state;
- enum smd_channel_state remote_state;
-
- struct smd_channel_info *tx_info;
- struct smd_channel_info *rx_info;
-
- struct smd_channel_info_word *tx_info_word;
- struct smd_channel_info_word *rx_info_word;
-
- struct mutex tx_lock;
- wait_queue_head_t fblockread_event;
-
- void *tx_fifo;
- void *rx_fifo;
- int fifo_size;
-
- void *bounce_buffer;
- int (*cb)(struct qcom_smd_device *, const void *, size_t);
-
- spinlock_t recv_lock;
-
- int pkt_size;
-
- struct list_head list;
-};
-
/**
* struct qcom_smd - smd struct
* @dev: device struct
@@ -323,6 +281,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
}
/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&channel->recv_lock, flags);
+ channel->cb = cb;
+ spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
+/*
* Calculate the amount of data available in the rx fifo
*/
static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
@@ -362,43 +333,37 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
}
/*
- * Copy count bytes of data using 32bit accesses, if that's required.
+ * Copy count bytes of data from memory to device memory using 32bit accesses
*/
-static void smd_copy_to_fifo(void __iomem *_dst,
- const void *_src,
- size_t count,
- bool word_aligned)
+static void smd_copy_to_fifo(void __iomem *_dst, const void *_src, size_t count, bool word_aligned)
{
- u32 *dst = (u32 *)_dst;
- u32 *src = (u32 *)_src;
-
- if (word_aligned) {
- count /= sizeof(u32);
- while (count--)
- writel_relaxed(*src++, dst++);
- } else {
- memcpy_toio(_dst, _src, count);
- }
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ writel_relaxed(*src++, dst++);
+ } else {
+ memcpy_toio(_dst, _src, count);
+ }
}
/*
- * Copy count bytes of data using 32bit accesses, if that is required.
+ * Copy count bytes of data from device memory to memory using 32bit accesses
*/
-static void smd_copy_from_fifo(void *_dst,
- const void __iomem *_src,
- size_t count,
- bool word_aligned)
+static void smd_copy_from_fifo(void *_dst, const void __iomem *_src, size_t count, bool word_aligned)
{
- u32 *dst = (u32 *)_dst;
- u32 *src = (u32 *)_src;
-
- if (word_aligned) {
- count /= sizeof(u32);
- while (count--)
- *dst++ = readl_relaxed(src++);
- } else {
- memcpy_fromio(_dst, _src, count);
- }
+ u32 *dst = (u32 *)_dst;
+ u32 *src = (u32 *)_src;
+
+ if (word_aligned) {
+ count /= sizeof(u32);
+ while (count--)
+ *dst++ = readl_relaxed(src++);
+ } else {
+ memcpy_fromio(_dst, _src, count);
+ }
}
/*
@@ -416,19 +381,11 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
tail = GET_RX_CHANNEL_INFO(channel, tail);
len = min_t(size_t, count, channel->fifo_size - tail);
- if (len) {
- smd_copy_from_fifo(buf,
- channel->rx_fifo + tail,
- len,
- word_aligned);
- }
+ if (len)
+ smd_copy_from_fifo(buf, channel->rx_fifo + tail, len, word_aligned);
- if (len != count) {
- smd_copy_from_fifo(buf + len,
- channel->rx_fifo,
- count - len,
- word_aligned);
- }
+ if (len != count)
+ smd_copy_from_fifo(buf + len, channel->rx_fifo, count - len, word_aligned);
return count;
}
@@ -561,13 +518,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
/*
* Handle state changes or data on each of the channels on this edge
*/
- spin_lock(&edge->channels_lock);
+ read_lock(&edge->channels_lock);
list_for_each_entry(channel, &edge->channels, list) {
spin_lock(&channel->recv_lock);
kick_worker |= qcom_smd_channel_intr(channel);
spin_unlock(&channel->recv_lock);
}
- spin_unlock(&edge->channels_lock);
+ read_unlock(&edge->channels_lock);
/*
* Creating a new channel requires allocating an smem entry, so we only
@@ -577,12 +534,11 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
available = qcom_smem_get_free_space(edge->remote_pid);
if (available != edge->smem_available) {
edge->smem_available = available;
- edge->need_rescan = true;
kick_worker = true;
}
if (kick_worker)
- schedule_work(&edge->work);
+ schedule_work(&edge->scan_work);
return IRQ_HANDLED;
}
@@ -631,19 +587,11 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
head = GET_TX_CHANNEL_INFO(channel, head);
len = min_t(size_t, count, channel->fifo_size - head);
- if (len) {
- smd_copy_to_fifo(channel->tx_fifo + head,
- data,
- len,
- word_aligned);
- }
+ if (len)
+ smd_copy_to_fifo(channel->tx_fifo + head, data, len, word_aligned);
- if (len != count) {
- smd_copy_to_fifo(channel->tx_fifo,
- data + len,
- count - len,
- word_aligned);
- }
+ if (len != count)
+ smd_copy_to_fifo(channel->tx_fifo, data + len, count - len, word_aligned);
head += count;
head &= (channel->fifo_size - 1);
@@ -667,16 +615,19 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
{
u32 hdr[5] = {len,};
int tlen = sizeof(hdr) + len;
- int ret;
+ int ret, length;
/* Word aligned channels only accept word size aligned data */
if (channel->rx_info_word != NULL && len % 4)
return -EINVAL;
+ length = qcom_smd_get_tx_avail(channel);
+
ret = mutex_lock_interruptible(&channel->tx_lock);
if (ret)
return ret;
+ length = qcom_smd_get_tx_avail(channel);
while (qcom_smd_get_tx_avail(channel) < tlen) {
if (channel->state != SMD_CHANNEL_OPENED) {
ret = -EPIPE;
@@ -696,9 +647,12 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
+ length = qcom_smd_get_tx_avail(channel);
qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
qcom_smd_write_fifo(channel, data, len);
+ length = qcom_smd_get_tx_avail(channel);
+
SET_TX_CHANNEL_INFO(channel, fHEAD, 1);
/* Ensure ordering of channel info updates */
@@ -727,22 +681,29 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev)
static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
+ const struct qcom_smd_id *match = qsdrv->smd_match_table;
+ const char *name = qsdev->channel->name;
+
+ if (match) {
+ while (match->name[0]) {
+ if (!strcmp(match->name, name))
+ return 1;
+ match++;
+ }
+ }
+
return of_driver_match_device(dev, drv);
}
/*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
+ * Helper for opening a channel
*/
-static int qcom_smd_dev_probe(struct device *dev)
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+ qcom_smd_cb_t cb)
{
- struct qcom_smd_device *qsdev = to_smd_device(dev);
- struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
- struct qcom_smd_channel *channel = qsdev->channel;
size_t bb_size;
- int ret;
/*
* Packets are maximum 4k, but reduce if the fifo is smaller
@@ -752,12 +713,44 @@ static int qcom_smd_dev_probe(struct device *dev)
if (!channel->bounce_buffer)
return -ENOMEM;
- channel->cb = qsdrv->callback;
-
+ qcom_smd_channel_set_callback(channel, cb);
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
+ return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+ qcom_smd_channel_set_callback(channel, NULL);
+
+ kfree(channel->bounce_buffer);
+ channel->bounce_buffer = NULL;
+
+ qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_reset(channel);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+ struct qcom_smd_device *qsdev = to_smd_device(dev);
+ struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+ struct qcom_smd_channel *channel = qsdev->channel;
+ int ret;
+
+ ret = qcom_smd_channel_open(channel, qsdrv->callback);
+ if (ret)
+ return ret;
+
ret = qsdrv->probe(qsdev);
if (ret)
goto err;
@@ -769,11 +762,7 @@ static int qcom_smd_dev_probe(struct device *dev)
err:
dev_err(&qsdev->dev, "probe failed\n");
- channel->cb = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+ qcom_smd_channel_close(channel);
return ret;
}
@@ -788,16 +777,15 @@ static int qcom_smd_dev_remove(struct device *dev)
struct qcom_smd_device *qsdev = to_smd_device(dev);
struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
struct qcom_smd_channel *channel = qsdev->channel;
- unsigned long flags;
+ struct qcom_smd_channel *tmp;
+ struct qcom_smd_channel *ch;
qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
/*
* Make sure we don't race with the code receiving data.
*/
- spin_lock_irqsave(&channel->recv_lock, flags);
- channel->cb = NULL;
- spin_unlock_irqrestore(&channel->recv_lock, flags);
+ qcom_smd_channel_set_callback(channel, NULL);
/* Wake up any sleepers in qcom_smd_send() */
wake_up_interruptible(&channel->fblockread_event);
@@ -810,15 +798,14 @@ static int qcom_smd_dev_remove(struct device *dev)
qsdrv->remove(qsdev);
/*
- * The client is now gone, cleanup and reset the channel state.
+ * The client is now gone, close and release all channels associated
+ * with this sdev
*/
- channel->qsdev = NULL;
- kfree(channel->bounce_buffer);
- channel->bounce_buffer = NULL;
-
- qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
- qcom_smd_channel_reset(channel);
+ list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+ qcom_smd_channel_close(ch);
+ list_del(&ch->dev_list);
+ ch->qsdev = NULL;
+ }
return 0;
}
@@ -880,19 +867,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
if (channel->qsdev)
return -EEXIST;
- node = qcom_smd_match_channel(edge->of_node, channel->name);
- if (!node) {
- dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
- return -ENXIO;
- }
-
dev_dbg(smd->dev, "registering '%s'\n", channel->name);
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
if (!qsdev)
return -ENOMEM;
- dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name);
+ node = qcom_smd_match_channel(edge->of_node, channel->name);
+ dev_set_name(&qsdev->dev, "%s.%s",
+ edge->of_node->name,
+ node ? node->name : channel->name);
+
qsdev->dev.parent = smd->dev;
qsdev->dev.bus = &qcom_smd_bus;
qsdev->dev.release = qcom_smd_release_device;
@@ -948,6 +933,76 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
}
EXPORT_SYMBOL(qcom_smd_driver_unregister);
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_channel *ret = NULL;
+ unsigned state;
+
+ read_lock(&edge->channels_lock);
+ list_for_each_entry(channel, &edge->channels, list) {
+ if (strcmp(channel->name, name))
+ continue;
+
+ state = GET_RX_CHANNEL_INFO(channel, state);
+ if (state != SMD_CHANNEL_OPENING &&
+ state != SMD_CHANNEL_OPENED)
+ continue;
+
+ ret = channel;
+ break;
+ }
+ read_unlock(&edge->channels_lock);
+
+ return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev: smd_device handle
+ * @name: channel name
+ * @cb: callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+ const char *name,
+ qcom_smd_cb_t cb)
+{
+ struct qcom_smd_channel *channel;
+ struct qcom_smd_edge *edge = sdev->channel->edge;
+ int ret;
+
+ /* Wait up to HZ for the channel to appear */
+ ret = wait_event_interruptible_timeout(edge->new_channel_event,
+ (channel = qcom_smd_find_channel(edge, name)) != NULL,
+ HZ);
+ if (!ret)
+ return ERR_PTR(-ETIMEDOUT);
+
+ if (channel->state != SMD_CHANNEL_CLOSED) {
+ dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ channel->qsdev = sdev;
+ ret = qcom_smd_channel_open(channel, cb);
+ if (ret) {
+ channel->qsdev = NULL;
+ return ERR_PTR(ret);
+ }
+
+ /*
+ * Append the list of channel to the channels associated with the sdev
+ */
+ list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+ return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
/*
* Allocate the qcom_smd_channel object for a newly found smd channel,
* retrieving and validating the smem items involved.
@@ -969,6 +1024,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
if (!channel)
return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&channel->dev_list);
channel->edge = edge;
channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
if (!channel->name)
@@ -1008,7 +1064,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
- dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
+ dev_err(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n",
name, info_size, fifo_size);
channel->tx_fifo = fifo_base;
@@ -1031,8 +1087,9 @@ free_name_and_channel:
* qcom_smd_create_channel() to create representations of these and add
* them to the edge's list of channels.
*/
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
{
+ struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
struct qcom_smd_alloc_entry *alloc_tbl;
struct qcom_smd_alloc_entry *entry;
struct qcom_smd_channel *channel;
@@ -1076,16 +1133,18 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
if (IS_ERR(channel))
continue;
- spin_lock_irqsave(&edge->channels_lock, flags);
+ write_lock_irqsave(&edge->channels_lock, flags);
list_add(&channel->list, &edge->channels);
- spin_unlock_irqrestore(&edge->channels_lock, flags);
+ write_unlock_irqrestore(&edge->channels_lock, flags);
dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
set_bit(i, edge->allocated[tbl]);
+
+ wake_up_interruptible(&edge->new_channel_event);
}
}
- schedule_work(&edge->work);
+ schedule_work(&edge->state_work);
}
/*
@@ -1093,29 +1152,22 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
* then scans all registered channels for state changes that should be handled
* by creating or destroying smd client devices for the registered channels.
*
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
*/
static void qcom_channel_state_worker(struct work_struct *work)
{
struct qcom_smd_channel *channel;
struct qcom_smd_edge *edge = container_of(work,
struct qcom_smd_edge,
- work);
+ state_work);
unsigned remote_state;
/*
- * Rescan smem if we have reason to belive that there are new channels.
- */
- if (edge->need_rescan) {
- edge->need_rescan = false;
- qcom_discover_channels(edge);
- }
-
- /*
* Register a device for any closed channel where the remote processor
* is showing interest in opening the channel.
*/
+ read_lock(&edge->channels_lock);
list_for_each_entry(channel, &edge->channels, list) {
if (channel->state != SMD_CHANNEL_CLOSED)
continue;
@@ -1125,7 +1177,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state != SMD_CHANNEL_OPENED)
continue;
+ read_unlock(&edge->channels_lock);
qcom_smd_create_device(channel);
+ read_lock(&edge->channels_lock);
}
/*
@@ -1142,8 +1196,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
remote_state == SMD_CHANNEL_OPENED)
continue;
+ read_unlock(&edge->channels_lock);
qcom_smd_destroy_device(channel);
+ read_lock(&edge->channels_lock);
}
+ read_unlock(&edge->channels_lock);
}
/*
@@ -1159,9 +1216,10 @@ static int qcom_smd_parse_edge(struct device *dev,
int ret;
INIT_LIST_HEAD(&edge->channels);
- spin_lock_init(&edge->channels_lock);
+ rwlock_init(&edge->channels_lock);
- INIT_WORK(&edge->work, qcom_channel_state_worker);
+ INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+ INIT_WORK(&edge->state_work, qcom_channel_state_worker);
edge->of_node = of_node_get(node);
@@ -1190,7 +1248,11 @@ static int qcom_smd_parse_edge(struct device *dev,
edge->remote_pid = QCOM_SMEM_HOST_ANY;
key = "qcom,remote-pid";
- of_property_read_u32(node, key, &edge->remote_pid);
+ ret = of_property_read_u32(node, key, &edge->remote_pid);
+ if (ret) {
+ dev_err(dev, "edge missing %s property\n", key);
+ return -EINVAL;
+ }
syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
if (!syscon_np) {
@@ -1244,13 +1306,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
for_each_available_child_of_node(pdev->dev.of_node, node) {
edge = &smd->edges[i++];
edge->smd = smd;
+ init_waitqueue_head(&edge->new_channel_event);
ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
if (ret)
continue;
- edge->need_rescan = true;
- schedule_work(&edge->work);
+ schedule_work(&edge->scan_work);
}
platform_set_drvdata(pdev, smd);
@@ -1273,8 +1335,10 @@ static int qcom_smd_remove(struct platform_device *pdev)
edge = &smd->edges[i];
disable_irq(edge->irq);
- cancel_work_sync(&edge->work);
+ cancel_work_sync(&edge->scan_work);
+ cancel_work_sync(&edge->state_work);
+ /* No need to lock here, because the writer is gone */
list_for_each_entry(channel, &edge->channels, list) {
if (!channel->qsdev)
continue;
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 52365188a1c2..7fddf3b491b6 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -20,6 +20,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
+#include <linux/debugfs.h>
/*
* The Qualcomm shared memory system is a allocate only heap structure that
@@ -85,6 +86,8 @@
/* Max number of processors/hosts in a system */
#define SMEM_HOST_COUNT 9
+#define SMEM_HEAP_INFO 1
+
/**
* struct smem_proc_comm - proc_comm communication struct (legacy)
* @command: current command to be executed
@@ -238,6 +241,9 @@ struct qcom_smem {
struct smem_partition_header *partitions[SMEM_HOST_COUNT];
+ struct dentry *dent;
+ u32 version;
+
unsigned num_regions;
struct smem_region regions[0];
};
@@ -642,6 +648,104 @@ static int qcom_smem_count_mem_regions(struct platform_device *pdev)
return num_regions;
}
+static void smem_debug_read_mem(struct seq_file *s)
+{
+ u32 *info;
+ size_t size;
+ int ret, i;
+ long flags;
+
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_HEAP_INFO,
+ (void **)&info, &size);
+
+ if (ret < 0)
+ seq_printf(s, "Can't get global heap information pool\n");
+ else {
+ seq_printf(s, "global heap\n");
+ seq_printf(s, " initialized: %d offset: %08x avail: %08x\n",
+ info[0], info[1], info[2]);
+
+ for (i = 0; i < 512; i++) {
+ ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, i,
+ (void **)&info, &size);
+ if (ret < 0)
+ continue;
+
+ seq_printf(s, " [%d]: p: %p s: %li\n", i, info,
+ size);
+ }
+ }
+
+ seq_printf(s, "\nSecure partitions accessible from APPS:\n");
+
+ ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
+ HWSPINLOCK_TIMEOUT,
+ &flags);
+
+ for (i = 0; i < SMEM_HOST_COUNT; i++) {
+ struct smem_partition_header *part_hdr = __smem->partitions[i];
+ void *p;
+
+ if (!part_hdr)
+ continue;
+
+ if (part_hdr->magic != SMEM_PART_MAGIC) {
+ seq_printf(s, " part[%d]: incorrect magic\n", i);
+ continue;
+ }
+
+ seq_printf(s, " part[%d]: (%d <-> %d) size: %d off: %08x\n",
+ i, part_hdr->host0, part_hdr->host1, part_hdr->size,
+ part_hdr->offset_free_uncached);
+
+ p = (void *)part_hdr + sizeof(*part_hdr);
+ while (p < (void *)part_hdr + part_hdr->offset_free_uncached) {
+ struct smem_private_entry *entry = p;
+
+ seq_printf(s,
+ " [%d]: %s size: %d pd: %d\n",
+ entry->item,
+ (entry->canary == SMEM_PRIVATE_CANARY) ?
+ "valid" : "invalid",
+ entry->size,
+ entry->padding_data);
+
+ p += sizeof(*entry) + entry->padding_hdr + entry->size;
+ }
+ }
+
+ hwspin_unlock_irqrestore(__smem->hwlock, &flags);
+}
+
+static void smem_debug_read_version(struct seq_file *s)
+{
+ seq_printf(s, "SBL version: %08x\n", __smem->version >> 16);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+ void (*show)(struct seq_file *) = s->private;
+
+ show(s);
+
+ return 0;
+}
+
+static int smem_debug_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debugfs_show, inode->i_private);
+}
+
+
+static const struct file_operations smem_debug_ops = {
+ .open = smem_debug_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+};
+
+
+
static int qcom_smem_probe(struct platform_device *pdev)
{
struct smem_header *header;
@@ -652,7 +756,6 @@ static int qcom_smem_probe(struct platform_device *pdev)
size_t array_size;
int num_regions = 0;
int hwlock_id;
- u32 version;
int ret;
int i;
@@ -703,9 +806,9 @@ static int qcom_smem_probe(struct platform_device *pdev)
return -EINVAL;
}
- version = qcom_smem_get_sbl_version(smem);
- if (version >> 16 != SMEM_EXPECTED_VERSION) {
- dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version);
+ smem->version = qcom_smem_get_sbl_version(smem);
+ if (smem->version >> 16 != SMEM_EXPECTED_VERSION) {
+ dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", smem->version);
return -EINVAL;
}
@@ -725,6 +828,17 @@ static int qcom_smem_probe(struct platform_device *pdev)
__smem = smem;
+ /* setup debugfs information */
+ __smem->dent = debugfs_create_dir("smem", 0);
+ if (IS_ERR(__smem->dent))
+ dev_info(smem->dev, "unable to create debugfs\n");
+
+ if (!debugfs_create_file("mem", 0444, __smem->dent, smem_debug_read_mem, &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+ if (!debugfs_create_file("version", 0444, __smem->dent, smem_debug_read_version,
+ &smem_debug_ops))
+ dev_err(smem->dev, "couldnt create mem file\n");
+
return 0;
}
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
index b04b05a0904e..7f751c5a4c94 100644
--- a/drivers/soc/qcom/spm.c
+++ b/drivers/soc/qcom/spm.c
@@ -20,11 +20,14 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
@@ -36,6 +39,9 @@
#define SPM_CTL_INDEX_SHIFT 4
#define SPM_CTL_EN BIT(0)
+/* Specifies the PMIC internal slew rate in uV/us. */
+#define REGULATOR_SLEW_RATE 1250
+
enum pm_sleep_mode {
PM_SLEEP_MODE_STBY,
PM_SLEEP_MODE_RET,
@@ -51,6 +57,8 @@ enum spm_reg {
SPM_REG_PMIC_DLY,
SPM_REG_PMIC_DATA_0,
SPM_REG_PMIC_DATA_1,
+ SPM_REG_RST,
+ SPM_REG_STS_1,
SPM_REG_VCTL,
SPM_REG_SEQ_ENTRY,
SPM_REG_SPM_STS,
@@ -68,9 +76,23 @@ struct spm_reg_data {
u8 start_index[PM_SLEEP_MODE_NR];
};
+struct spm_vlevel_data {
+ struct spm_driver_data *drv;
+ unsigned selector;
+};
+
+struct saw2_vreg {
+ struct regulator_desc rdesc;
+ struct regulator_dev *rdev;
+ unsigned int uV;
+ u32 vlevel;
+ struct spm_driver_data *drv;
+};
+
struct spm_driver_data {
void __iomem *reg_base;
const struct spm_reg_data *reg_data;
+ struct saw2_vreg *vreg;
};
static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
@@ -94,10 +116,13 @@ static const struct spm_reg_data spm_reg_8974_8084_cpu = {
static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
[SPM_REG_CFG] = 0x08,
+ [SPM_REG_STS_1] = 0x10,
+ [SPM_REG_VCTL] = 0x14,
[SPM_REG_SPM_CTL] = 0x20,
[SPM_REG_PMIC_DLY] = 0x24,
[SPM_REG_PMIC_DATA_0] = 0x28,
[SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_RST] = 0x30,
[SPM_REG_SEQ_ENTRY] = 0x80,
};
@@ -282,6 +307,146 @@ static struct cpuidle_ops qcom_cpuidle_ops __initdata = {
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v1, "qcom,kpss-acc-v1", &qcom_cpuidle_ops);
CPUIDLE_METHOD_OF_DECLARE(qcom_idle_v2, "qcom,kpss-acc-v2", &qcom_cpuidle_ops);
+static const unsigned int saw2_volt_table[] = {
+ 850000, 862500, 875000, 887500, 900000, 912500,
+ 925000, 937500, 950000, 962500, 975000, 987500,
+ 1000000, 1012500, 1025000, 1037500, 1050000, 1062500,
+ 1075000, 1087500, 1100000, 1112500, 1125000, 1137000,
+ 1137500, 1150000, 1162500, 1175000, 1187500, 1200000,
+ 1212500, 1225000, 1237500, 1250000, 1287500
+};
+
+static const u32 vlevels[] = {
+ 0x94, 0x94, 0x94, 0x94, 0x94, 0x94,
+ 0x94, 0x96, 0x96, 0x96, 0x98, 0x98,
+ 0x98, 0x9a, 0x9a, 0x9e, 0xa0, 0xa0,
+ 0xa2, 0xa6, 0xa8, 0xa8, 0xaa, 0xaa,
+ 0xac, 0xac, 0xac, 0xac, 0xac, 0xac,
+ 0xac, 0xac, 0xac, 0xac, 0xac
+};
+
+static int saw2_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+
+ return drv->vreg->uV;
+}
+
+static void spm_smp_set_vdd(void *data)
+{
+ struct spm_vlevel_data *vdata = (struct spm_vlevel_data *)data;
+ struct spm_driver_data *drv = vdata->drv;
+ struct saw2_vreg *vreg = drv->vreg;
+ unsigned long sel = vdata->selector;
+ u32 new_vlevel;
+ u32 vctl, data0, data1;
+ int timeout_us = 50;
+
+ if (vreg->vlevel == vlevels[sel])
+ return;
+
+ vctl = spm_register_read(drv, SPM_REG_VCTL);
+ data0 = spm_register_read(drv, SPM_REG_PMIC_DATA_0);
+ data1 = spm_register_read(drv, SPM_REG_PMIC_DATA_1);
+
+ vctl &= ~0xff;
+ vctl |= vlevels[sel];
+
+ data0 &= ~0xff;
+ data0 |= vlevels[sel];
+
+ data1 &= ~0x3f;
+ data1 |= (vlevels[sel] & 0x3f);
+ data1 &= ~0x3F0000;
+ data1 |= ((vlevels[sel] & 0x3f) << 16);
+
+ spm_register_write(drv, SPM_REG_RST, 1);
+ spm_register_write(drv, SPM_REG_VCTL, vctl);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0, data0);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1, data1);
+
+ do {
+ new_vlevel = spm_register_read(drv, SPM_REG_STS_1) & 0xff;
+ if (new_vlevel == vlevels[sel])
+ break;
+ udelay(1);
+ } while (--timeout_us);
+
+ if (!timeout_us) {
+ pr_info("%s: Voltage not changed %#x\n", __func__, new_vlevel);
+ return;
+ }
+
+ if (saw2_volt_table[sel] > vreg->uV) {
+ /* Wait for voltage to stabalize. */
+ udelay((saw2_volt_table[sel] - vreg->uV) / REGULATOR_SLEW_RATE);
+ }
+
+ vreg->uV = saw2_volt_table[sel];
+ vreg->vlevel = vlevels[sel];
+}
+
+static int saw2_regulator_set_voltage_sel(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ struct spm_driver_data *drv = rdev_get_drvdata(rdev);
+ struct spm_vlevel_data data;
+ int ret;
+ int cpu = rdev_get_id(rdev);
+
+ data.drv = drv;
+ data.selector = selector;
+
+ ret = smp_call_function_single(cpu, spm_smp_set_vdd, &data, true);
+
+ return ret;
+}
+
+static struct regulator_ops saw2_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .get_voltage = saw2_regulator_get_voltage,
+ .set_voltage_sel = saw2_regulator_set_voltage_sel,
+};
+
+static struct regulator_desc saw2_regulator = {
+ .owner = THIS_MODULE,
+ .type = REGULATOR_VOLTAGE,
+ .ops = &saw2_regulator_ops,
+ .volt_table = saw2_volt_table,
+ .n_voltages = ARRAY_SIZE(saw2_volt_table),
+};
+
+static int register_saw2_regulator(struct spm_driver_data *drv,
+ struct platform_device *pdev, int cpu)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct saw2_vreg *vreg;
+ struct regulator_config config = { };
+
+ vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ drv->vreg = vreg;
+ config.driver_data = drv;
+ config.dev = &pdev->dev;
+ config.of_node = np;
+
+ vreg->rdesc = saw2_regulator;
+ vreg->rdesc.id = cpu;
+ vreg->rdesc.name = of_get_property(np, "regulator-name", NULL);
+ config.init_data = of_get_regulator_init_data(&pdev->dev,
+ pdev->dev.of_node,
+ &vreg->rdesc);
+
+ vreg->rdev = devm_regulator_register(&pdev->dev, &vreg->rdesc, &config);
+ if (IS_ERR(vreg->rdev))
+ return PTR_ERR(vreg->rdev);
+
+ return 0;
+}
+
static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
int *spm_cpu)
{
@@ -327,7 +492,7 @@ static int spm_dev_probe(struct platform_device *pdev)
struct resource *res;
const struct of_device_id *match_id;
void __iomem *addr;
- int cpu;
+ int cpu, ret;
drv = spm_get_drv(pdev, &cpu);
if (!drv)
@@ -356,6 +521,7 @@ static int spm_dev_probe(struct platform_device *pdev)
* machine, before the sequences are completely written.
*/
spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+
spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
spm_register_write(drv, SPM_REG_PMIC_DATA_0,
@@ -368,6 +534,10 @@ static int spm_dev_probe(struct platform_device *pdev)
per_cpu(cpu_spm_drv, cpu) = drv;
+ ret = register_saw2_regulator(drv, pdev, cpu);
+ if (ret)
+ dev_err(&pdev->dev, "error registering SAW2 regulator\n");
+
return 0;
}
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5aabc4bc0d75..d49f2bd3d4d7 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -374,4 +374,9 @@ config QCOM_SPMI_TEMP_ALARM
real time die temperature if an ADC is present or an estimate of the
temperature based upon the over temperature stage value.
+menu "Qualcomm thermal drivers"
+depends on ARCH_QCOM && OF
+source "drivers/thermal/qcom/Kconfig"
+endmenu
+
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 26f160809959..cdaa55f9d4eb 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,5 +43,6 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
obj-$(CONFIG_ST_THERMAL) += st/
+obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-$(CONFIG_TEGRA_SOCTHERM) += tegra_soctherm.o
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
new file mode 100644
index 000000000000..f7e8e4074d80
--- /dev/null
+++ b/drivers/thermal/qcom/Kconfig
@@ -0,0 +1,10 @@
+config QCOM_TSENS
+ tristate "Qualcomm TSENS Temperature Alarm"
+ depends on THERMAL
+ depends on QCOM_QFPROM
+ help
+ This enables the thermal sysfs driver for the TSENS device. It shows
+ up in Sysfs as a thermal zone with multiple trip points. Disabling the
+ thermal zone device via the mode file results in disabling the sensor.
+ Also able to set threshold temperature for both hot and cold and update
+ when a threshold is reached.
diff --git a/drivers/thermal/qcom/Makefile b/drivers/thermal/qcom/Makefile
new file mode 100644
index 000000000000..f3cefd19e898
--- /dev/null
+++ b/drivers/thermal/qcom/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_QCOM_TSENS) += qcom_tsens.o
+qcom_tsens-y += tsens.o tsens-common.o tsens-8916.o tsens-8974.o tsens-8960.o
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
new file mode 100644
index 000000000000..a69aea35bf81
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8916 */
+#define BASE0_MASK 0x0000007f
+#define BASE1_MASK 0xfe000000
+#define BASE0_SHIFT 0
+#define BASE1_SHIFT 25
+
+#define S0_P1_MASK 0x00000f80
+#define S1_P1_MASK 0x003e0000
+#define S2_P1_MASK 0xf8000000
+#define S3_P1_MASK 0x000003e0
+#define S4_P1_MASK 0x000f8000
+
+#define S0_P2_MASK 0x0001f000
+#define S1_P2_MASK 0x07c00000
+#define S2_P2_MASK 0x0000001f
+#define S3_P2_MASK 0x00007c00
+#define S4_P2_MASK 0x01f00000
+
+#define S0_P1_SHIFT 7
+#define S1_P1_SHIFT 17
+#define S2_P1_SHIFT 27
+#define S3_P1_SHIFT 5
+#define S4_P1_SHIFT 15
+
+#define S0_P2_SHIFT 12
+#define S1_P2_SHIFT 22
+#define S2_P2_SHIFT 0
+#define S3_P2_SHIFT 10
+#define S4_P2_SHIFT 20
+
+#define CAL_SEL_MASK 0xe0000000
+#define CAL_SEL_SHIFT 29
+
+static int calibrate_8916(struct tsens_device *tmdev)
+{
+ int base0 = 0, base1 = 0, i;
+ u32 p1[5], p2[5];
+ int mode = 0;
+ u32 *qfprom_cdata, *qfprom_csel;
+
+ qfprom_cdata = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(qfprom_cdata))
+ return PTR_ERR(qfprom_cdata);
+
+ qfprom_csel = (u32 *)qfprom_read(tmdev->dev, "calib_sel");
+ if (IS_ERR(qfprom_csel))
+ return PTR_ERR(qfprom_csel);
+
+ mode = (qfprom_csel[0] & CAL_SEL_MASK) >> CAL_SEL_SHIFT;
+ dev_dbg(tmdev->dev, "calibration mode is %d\n", mode);
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base1 = (qfprom_cdata[1] & BASE1_MASK) >> BASE1_SHIFT;
+ p2[0] = (qfprom_cdata[0] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (qfprom_cdata[0] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (qfprom_cdata[1] & S2_P2_MASK) >> S2_P2_SHIFT;
+ p2[3] = (qfprom_cdata[1] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (qfprom_cdata[1] & S4_P2_MASK) >> S4_P2_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = ((base1 + p2[i]) << 3);
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ base0 = (qfprom_cdata[0] & BASE0_MASK);
+ p1[0] = (qfprom_cdata[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (qfprom_cdata[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (qfprom_cdata[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (qfprom_cdata[1] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (qfprom_cdata[1] & S4_P1_MASK) >> S4_P1_SHIFT;
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] = (((base0) + p1[i]) << 3);
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] = 500;
+ p2[i] = 780;
+ }
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8916 = {
+ .init = init_common,
+ .calibrate = calibrate_8916,
+ .get_temp = get_temp_common,
+};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
new file mode 100644
index 000000000000..00f45e7f872f
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define CAL_MDEGC 30000
+
+#define CONFIG_ADDR 0x3640
+#define CONFIG_ADDR_8660 0x3620
+/* CONFIG_ADDR bitmasks */
+#define CONFIG 0x9b
+#define CONFIG_MASK 0xf
+#define CONFIG_8660 1
+#define CONFIG_SHIFT_8660 28
+#define CONFIG_MASK_8660 (3 << CONFIG_SHIFT_8660)
+
+#define STATUS_CNTL_ADDR_8064 0x3660
+#define CNTL_ADDR 0x3620
+/* CNTL_ADDR bitmasks */
+#define EN BIT(0)
+#define SW_RST BIT(1)
+#define SENSOR0_EN BIT(3)
+#define SLP_CLK_ENA BIT(26)
+#define SLP_CLK_ENA_8660 BIT(24)
+#define MEASURE_PERIOD 1
+#define SENSOR0_SHIFT 3
+
+/* INT_STATUS_ADDR bitmasks */
+#define MIN_STATUS_MASK BIT(0)
+#define LOWER_STATUS_CLR BIT(1)
+#define UPPER_STATUS_CLR BIT(2)
+#define MAX_STATUS_MASK BIT(3)
+
+#define THRESHOLD_ADDR 0x3624
+/* THRESHOLD_ADDR bitmasks */
+#define THRESHOLD_MAX_LIMIT_SHIFT 24
+#define THRESHOLD_MIN_LIMIT_SHIFT 16
+#define THRESHOLD_UPPER_LIMIT_SHIFT 8
+#define THRESHOLD_LOWER_LIMIT_SHIFT 0
+
+/* Initial temperature threshold values */
+#define LOWER_LIMIT_TH 0x50
+#define UPPER_LIMIT_TH 0xdf
+#define MIN_LIMIT_TH 0x0
+#define MAX_LIMIT_TH 0xff
+
+#define S0_STATUS_ADDR 0x3628
+#define INT_STATUS_ADDR 0x363c
+#define TRDY_MASK BIT(7)
+
+static int suspend_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ unsigned int mask;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, CNTL_ADDR, &tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ mask = SLP_CLK_ENA | EN;
+ else
+ mask = SLP_CLK_ENA_8660 | EN;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, mask, 0);
+ if (ret)
+ return ret;
+
+ tmdev->trdy = false;
+
+ return 0;
+}
+
+static int resume_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ unsigned long reg_cntl;
+ struct regmap *map = tmdev->map;
+
+ ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
+ if (ret)
+ return ret;
+
+ /*
+ * Separate CONFIG restore is not needed only for 8660 as
+ * config is part of CTRL Addr and its restored as such
+ */
+ if (tmdev->num_sensors > 1) {
+ ret = regmap_update_bits(map, CONFIG_ADDR, CONFIG_MASK, CONFIG);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(map, THRESHOLD_ADDR, tmdev->ctx.threshold);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(map, CNTL_ADDR, tmdev->ctx.control);
+ if (ret)
+ return ret;
+
+ reg_cntl = tmdev->ctx.control;
+ reg_cntl >>= SENSOR0_SHIFT;
+
+ return 0;
+}
+
+static int enable_8960(struct tsens_device *tmdev, int id)
+{
+ int ret;
+ u32 reg, mask;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ if (ret)
+ return ret;
+
+ mask = BIT(id + SENSOR0_SHIFT);
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1)
+ reg |= mask | SLP_CLK_ENA | EN;
+ else
+ reg |= mask | SLP_CLK_ENA_8660 | EN;
+
+ tmdev->trdy = false;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void disable_8960(struct tsens_device *tmdev)
+{
+ int ret;
+ u32 reg_cntl;
+ u32 mask;
+
+ mask = GENMASK(tmdev->num_sensors - 1, 0);
+ mask <<= SENSOR0_SHIFT;
+ mask |= EN;
+
+ ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ if (ret)
+ return;
+
+ reg_cntl &= ~mask;
+
+ if (tmdev->num_sensors > 1)
+ reg_cntl &= ~SLP_CLK_ENA;
+ else
+ reg_cntl &= ~SLP_CLK_ENA_8660;
+
+ regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+}
+
+static int init_8960(struct tsens_device *tmdev)
+{
+ int ret, i;
+ u32 reg_cntl;
+
+ tmdev->map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->map)
+ return -ENODEV;
+
+ /*
+ * The status registers for each sensor are discontiguous
+ * because some SoCs have 5 sensors while others have more
+ * but the control registers stay in the same place, i.e
+ * directly after the first 5 status registers.
+ */
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (i >= 5)
+ tmdev->sensor[i].status = S0_STATUS_ADDR + 40;
+ tmdev->sensor[i].status += i * 4;
+ }
+
+ reg_cntl = SW_RST;
+ ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ if (ret)
+ return ret;
+
+ if (tmdev->num_sensors > 1) {
+ reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
+ reg_cntl &= ~SW_RST;
+ ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ CONFIG_MASK, CONFIG);
+ } else {
+ reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
+ reg_cntl &= ~CONFIG_MASK_8660;
+ reg_cntl |= CONFIG_8660 << CONFIG_SHIFT_8660;
+ }
+
+ reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ reg_cntl |= EN;
+ ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int calibrate_8960(struct tsens_device *tmdev)
+{
+ int i;
+ char *data;
+
+ ssize_t num_read = tmdev->num_sensors;
+ struct tsens_sensor *s = tmdev->sensor;
+
+ data = qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(data))
+ data = qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ for (i = 0; i < num_read; i++, s++)
+ s->offset = CAL_MDEGC - s->slope * data[i];
+
+ return 0;
+}
+
+/* Temperature on y axis and ADC-code on x-axis */
+static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
+{
+ return adc_code * s->slope + s->offset;
+}
+
+static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
+{
+ int ret;
+ u32 code, trdy;
+ const struct tsens_sensor *s = &tmdev->sensor[id];
+
+ if (!tmdev->trdy) {
+ ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ if (ret)
+ return ret;
+ while (!(trdy & TRDY_MASK)) {
+ usleep_range(1000, 1100);
+ regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ }
+ tmdev->trdy = true;
+ }
+
+ ret = regmap_read(tmdev->map, s->status, &code);
+ if (ret)
+ return ret;
+
+ *temp = code_to_mdegC(code, s);
+
+ dev_dbg(tmdev->dev, "Sensor%d temp is: %d", id, *temp);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8960 = {
+ .init = init_8960,
+ .calibrate = calibrate_8960,
+ .get_temp = get_temp_8960,
+ .enable = enable_8960,
+ .disable = disable_8960,
+ .suspend = suspend_8960,
+ .resume = resume_8960,
+};
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
new file mode 100644
index 000000000000..19d9258a439f
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include "tsens.h"
+
+/* eeprom layout data for 8974 */
+#define BASE1_MASK 0xff
+#define S0_P1_MASK 0x3f00
+#define S1_P1_MASK 0xfc000
+#define S2_P1_MASK 0x3f00000
+#define S3_P1_MASK 0xfc000000
+#define S4_P1_MASK 0x3f
+#define S5_P1_MASK 0xfc0
+#define S6_P1_MASK 0x3f000
+#define S7_P1_MASK 0xfc0000
+#define S8_P1_MASK 0x3f000000
+#define S8_P1_MASK_BKP 0x3f
+#define S9_P1_MASK 0x3f
+#define S9_P1_MASK_BKP 0xfc0
+#define S10_P1_MASK 0xfc0
+#define S10_P1_MASK_BKP 0x3f000
+#define CAL_SEL_0_1 0xc0000000
+#define CAL_SEL_2 0x40000000
+#define CAL_SEL_SHIFT 30
+#define CAL_SEL_SHIFT_2 28
+
+#define S0_P1_SHIFT 8
+#define S1_P1_SHIFT 14
+#define S2_P1_SHIFT 20
+#define S3_P1_SHIFT 26
+#define S5_P1_SHIFT 6
+#define S6_P1_SHIFT 12
+#define S7_P1_SHIFT 18
+#define S8_P1_SHIFT 24
+#define S9_P1_BKP_SHIFT 6
+#define S10_P1_SHIFT 6
+#define S10_P1_BKP_SHIFT 12
+
+#define BASE2_SHIFT 12
+#define BASE2_BKP_SHIFT 18
+#define S0_P2_SHIFT 20
+#define S0_P2_BKP_SHIFT 26
+#define S1_P2_SHIFT 26
+#define S2_P2_BKP_SHIFT 6
+#define S3_P2_SHIFT 6
+#define S3_P2_BKP_SHIFT 12
+#define S4_P2_SHIFT 12
+#define S4_P2_BKP_SHIFT 18
+#define S5_P2_SHIFT 18
+#define S5_P2_BKP_SHIFT 24
+#define S6_P2_SHIFT 24
+#define S7_P2_BKP_SHIFT 6
+#define S8_P2_SHIFT 6
+#define S8_P2_BKP_SHIFT 12
+#define S9_P2_SHIFT 12
+#define S9_P2_BKP_SHIFT 18
+#define S10_P2_SHIFT 18
+#define S10_P2_BKP_SHIFT 24
+
+#define BASE2_MASK 0xff000
+#define BASE2_BKP_MASK 0xfc0000
+#define S0_P2_MASK 0x3f00000
+#define S0_P2_BKP_MASK 0xfc000000
+#define S1_P2_MASK 0xfc000000
+#define S1_P2_BKP_MASK 0x3f
+#define S2_P2_MASK 0x3f
+#define S2_P2_BKP_MASK 0xfc0
+#define S3_P2_MASK 0xfc0
+#define S3_P2_BKP_MASK 0x3f000
+#define S4_P2_MASK 0x3f000
+#define S4_P2_BKP_MASK 0xfc0000
+#define S5_P2_MASK 0xfc0000
+#define S5_P2_BKP_MASK 0x3f000000
+#define S6_P2_MASK 0x3f000000
+#define S6_P2_BKP_MASK 0x3f
+#define S7_P2_MASK 0x3f
+#define S7_P2_BKP_MASK 0xfc0
+#define S8_P2_MASK 0xfc0
+#define S8_P2_BKP_MASK 0x3f000
+#define S9_P2_MASK 0x3f000
+#define S9_P2_BKP_MASK 0xfc0000
+#define S10_P2_MASK 0xfc0000
+#define S10_P2_BKP_MASK 0x3f000000
+
+#define BKP_SEL 0x3
+#define BKP_REDUN_SEL 0xe0000000
+#define BKP_REDUN_SHIFT 29
+
+#define BIT_APPEND 0x3
+
+static int calibrate_8974(struct tsens_device *tmdev)
+{
+ int base1 = 0, base2 = 0, i;
+ u32 p1[11], p2[11];
+ int mode = 0;
+ u32 *calib, *bkp;
+ u32 calib_redun_sel;
+
+ calib = (u32 *)qfprom_read(tmdev->dev, "calib");
+ if (IS_ERR(calib))
+ return PTR_ERR(calib);
+
+ bkp = (u32 *)qfprom_read(tmdev->dev, "calib_backup");
+ if (IS_ERR(bkp))
+ return PTR_ERR(bkp);
+
+ calib_redun_sel = bkp[1] & BKP_REDUN_SEL;
+ calib_redun_sel >>= BKP_REDUN_SHIFT;
+
+ if (calib_redun_sel == BKP_SEL) {
+ mode = (calib[4] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[5] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (bkp[2] & BASE2_BKP_MASK) >> BASE2_BKP_SHIFT;
+ p2[0] = (bkp[2] & S0_P2_BKP_MASK) >> S0_P2_BKP_SHIFT;
+ p2[1] = (bkp[3] & S1_P2_BKP_MASK);
+ p2[2] = (bkp[3] & S2_P2_BKP_MASK) >> S2_P2_BKP_SHIFT;
+ p2[3] = (bkp[3] & S3_P2_BKP_MASK) >> S3_P2_BKP_SHIFT;
+ p2[4] = (bkp[3] & S4_P2_BKP_MASK) >> S4_P2_BKP_SHIFT;
+ p2[5] = (calib[4] & S5_P2_BKP_MASK) >> S5_P2_BKP_SHIFT;
+ p2[6] = (calib[5] & S6_P2_BKP_MASK);
+ p2[7] = (calib[5] & S7_P2_BKP_MASK) >> S7_P2_BKP_SHIFT;
+ p2[8] = (calib[5] & S8_P2_BKP_MASK) >> S8_P2_BKP_SHIFT;
+ p2[9] = (calib[5] & S9_P2_BKP_MASK) >> S9_P2_BKP_SHIFT;
+ p2[10] = (calib[5] & S10_P2_BKP_MASK) >> S10_P2_BKP_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = bkp[0] & BASE1_MASK;
+ p1[0] = (bkp[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (bkp[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (bkp[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (bkp[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (bkp[1] & S4_P1_MASK);
+ p1[5] = (bkp[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (bkp[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (bkp[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (bkp[2] & S8_P1_MASK_BKP) >> S8_P1_SHIFT;
+ p1[9] = (bkp[2] & S9_P1_MASK_BKP) >> S9_P1_BKP_SHIFT;
+ p1[10] = (bkp[2] & S10_P1_MASK_BKP) >> S10_P1_BKP_SHIFT;
+ break;
+ }
+ } else {
+ mode = (calib[1] & CAL_SEL_0_1) >> CAL_SEL_SHIFT;
+ mode |= (calib[3] & CAL_SEL_2) >> CAL_SEL_SHIFT_2;
+
+ switch (mode) {
+ case TWO_PT_CALIB:
+ base2 = (calib[2] & BASE2_MASK) >> BASE2_SHIFT;
+ p2[0] = (calib[2] & S0_P2_MASK) >> S0_P2_SHIFT;
+ p2[1] = (calib[2] & S1_P2_MASK) >> S1_P2_SHIFT;
+ p2[2] = (calib[3] & S2_P2_MASK);
+ p2[3] = (calib[3] & S3_P2_MASK) >> S3_P2_SHIFT;
+ p2[4] = (calib[3] & S4_P2_MASK) >> S4_P2_SHIFT;
+ p2[5] = (calib[3] & S5_P2_MASK) >> S5_P2_SHIFT;
+ p2[6] = (calib[3] & S6_P2_MASK) >> S6_P2_SHIFT;
+ p2[7] = (calib[4] & S7_P2_MASK);
+ p2[8] = (calib[4] & S8_P2_MASK) >> S8_P2_SHIFT;
+ p2[9] = (calib[4] & S9_P2_MASK) >> S9_P2_SHIFT;
+ p2[10] = (calib[4] & S10_P2_MASK) >> S10_P2_SHIFT;
+ /* Fall through */
+ case ONE_PT_CALIB:
+ case ONE_PT_CALIB2:
+ base1 = calib[0] & BASE1_MASK;
+ p1[0] = (calib[0] & S0_P1_MASK) >> S0_P1_SHIFT;
+ p1[1] = (calib[0] & S1_P1_MASK) >> S1_P1_SHIFT;
+ p1[2] = (calib[0] & S2_P1_MASK) >> S2_P1_SHIFT;
+ p1[3] = (calib[0] & S3_P1_MASK) >> S3_P1_SHIFT;
+ p1[4] = (calib[1] & S4_P1_MASK);
+ p1[5] = (calib[1] & S5_P1_MASK) >> S5_P1_SHIFT;
+ p1[6] = (calib[1] & S6_P1_MASK) >> S6_P1_SHIFT;
+ p1[7] = (calib[1] & S7_P1_MASK) >> S7_P1_SHIFT;
+ p1[8] = (calib[1] & S8_P1_MASK) >> S8_P1_SHIFT;
+ p1[9] = (calib[2] & S9_P1_MASK);
+ p1[10] = (calib[2] & S10_P1_MASK) >> S10_P1_SHIFT;
+ break;
+ }
+ }
+
+ switch (mode) {
+ case ONE_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p1[i] += (base1 << 2) | BIT_APPEND;
+ break;
+ case TWO_PT_CALIB:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p2[i] += base2;
+ p2[i] <<= 2;
+ p2[i] |= BIT_APPEND;
+ }
+ /* Fall through */
+ case ONE_PT_CALIB2:
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ p1[i] += base1;
+ p1[i] <<= 2;
+ p1[i] |= BIT_APPEND;
+ }
+ break;
+ default:
+ for (i = 0; i < tmdev->num_sensors; i++)
+ p2[i] = 780;
+ p1[0] = 502;
+ p1[1] = 509;
+ p1[2] = 503;
+ p1[3] = 509;
+ p1[4] = 505;
+ p1[5] = 509;
+ p1[6] = 507;
+ p1[7] = 510;
+ p1[8] = 508;
+ p1[9] = 509;
+ p1[10] = 508;
+ break;
+ }
+
+ compute_intercept_slope(tmdev, p1, p2, mode);
+
+ return 0;
+}
+
+const struct tsens_ops ops_8974 = {
+ .init = init_common,
+ .calibrate = calibrate_8974,
+ .get_temp = get_temp_common,
+};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
new file mode 100644
index 000000000000..4acf52c3533e
--- /dev/null
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include "tsens.h"
+
+#define S0_ST_ADDR 0x1030
+#define SN_ADDR_OFFSET 0x4
+#define SN_ST_TEMP_MASK 0x3ff
+#define CAL_DEGC_PT1 30
+#define CAL_DEGC_PT2 120
+#define SLOPE_FACTOR 1000
+
+char *qfprom_read(struct device *dev, const char *cname)
+{
+ struct nvmem_cell *cell;
+ ssize_t data;
+ char *ret;
+
+ cell = nvmem_cell_get(dev, cname);
+ if (IS_ERR(cell))
+ return ERR_CAST(cell);
+
+ ret = nvmem_cell_read(cell, &data);
+ nvmem_cell_put(cell);
+ return ret;
+}
+
+void compute_intercept_slope(struct tsens_device *tmdev, u32 *p1,
+ u32 *p2, u32 mode)
+{
+ int i;
+ int num, den;
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ dev_dbg(tmdev->dev,
+ "sensor%d - data_point1:%#x data_point2:%#x\n",
+ i, p1[i], p2[i]);
+
+ if (mode == TWO_PT_CALIB) {
+ /*
+ * slope (m) = adc_code2 - adc_code1 (y2 - y1)/
+ * temp_120_degc - temp_30_degc (x2 - x1)
+ */
+ num = p2[i] - p1[i];
+ num *= SLOPE_FACTOR;
+ den = CAL_DEGC_PT2 - CAL_DEGC_PT1;
+ tmdev->sensor[i].slope = num / den;
+ }
+
+ tmdev->sensor[i].offset = (p1[i] * SLOPE_FACTOR) -
+ (CAL_DEGC_PT1 *
+ tmdev->sensor[i].slope);
+ dev_dbg(tmdev->dev, "offset:%d\n", tmdev->sensor[i].offset);
+ }
+}
+
+static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
+{
+ int degc, num, den;
+
+ num = (adc_code * SLOPE_FACTOR) - s->offset;
+ den = s->slope;
+
+ if (num > 0)
+ degc = num + (den / 2);
+ else if (num < 0)
+ degc = num - (den / 2);
+ else
+ degc = num;
+
+ degc /= den;
+
+ return degc;
+}
+
+int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
+{
+ struct tsens_sensor *s = &tmdev->sensor[id];
+ u32 code;
+ unsigned int sensor_addr;
+ int last_temp = 0, ret;
+
+ sensor_addr = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->map, sensor_addr, &code);
+ if (ret)
+ return ret;
+ last_temp = code & SN_ST_TEMP_MASK;
+
+ *temp = code_to_degc(last_temp, s) * 1000;
+
+ return 0;
+}
+
+static const struct regmap_config tsens_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
+int init_common(struct tsens_device *tmdev)
+{
+ void __iomem *base;
+
+ base = of_iomap(tmdev->dev->of_node, 0);
+ if (IS_ERR(base))
+ return -EINVAL;
+
+ tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
+ if (!tmdev->map)
+ return -ENODEV;
+
+ return 0;
+}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
new file mode 100644
index 000000000000..d640e8f329f6
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+#include "tsens.h"
+
+static int tsens_get_temp(void *data, int *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ return tmdev->ops->get_temp(tmdev, s->id, temp);
+}
+
+static int tsens_get_trend(void *data, long *temp)
+{
+ const struct tsens_sensor *s = data;
+ struct tsens_device *tmdev = s->tmdev;
+
+ if (tmdev->ops->get_trend)
+ return tmdev->ops->get_trend(tmdev, s->id, temp);
+
+ return -ENOSYS;
+}
+
+static int tsens_suspend(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->suspend)
+ return tmdev->ops->suspend(tmdev);
+
+ return 0;
+}
+
+static int tsens_resume(struct device *dev)
+{
+ struct tsens_device *tmdev = dev_get_drvdata(dev);
+
+ if (tmdev->ops && tmdev->ops->resume)
+ return tmdev->ops->resume(tmdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tsens_pm_ops, tsens_suspend, tsens_resume);
+
+static const struct of_device_id tsens_table[] = {
+ {
+ .compatible = "qcom,msm8916-tsens",
+ .data = &ops_8916,
+ }, {
+ .compatible = "qcom,msm8974-tsens",
+ .data = &ops_8974,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tsens_table);
+
+static const struct thermal_zone_of_device_ops tsens_of_ops = {
+ .get_temp = tsens_get_temp,
+ .get_trend = tsens_get_trend,
+};
+
+static int tsens_register(struct tsens_device *tmdev)
+{
+ int i, ret;
+ struct thermal_zone_device *tzd;
+ u32 *hw_id, n = tmdev->num_sensors;
+ struct device_node *np = tmdev->dev->of_node;
+
+ hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
+ if (!hw_id)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, "qcom,sensor-id", hw_id, n);
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ if (ret)
+ tmdev->sensor[i].hw_id = i;
+ else
+ tmdev->sensor[i].hw_id = hw_id[i];
+ tmdev->sensor[i].tmdev = tmdev;
+ tmdev->sensor[i].id = i;
+ tzd = thermal_zone_of_sensor_register(tmdev->dev, i,
+ &tmdev->sensor[i],
+ &tsens_of_ops);
+ if (IS_ERR(tzd))
+ continue;
+ tmdev->sensor[i].tzd = tzd;
+ if (tmdev->ops->enable)
+ tmdev->ops->enable(tmdev, i);
+ }
+ return 0;
+}
+
+static int tsens_probe(struct platform_device *pdev)
+{
+ int ret, i, num;
+ struct device *dev;
+ struct device_node *np;
+ struct tsens_sensor *s;
+ struct tsens_device *tmdev;
+ const struct of_device_id *id;
+
+ if (pdev->dev.of_node)
+ dev = &pdev->dev;
+ else
+ dev = pdev->dev.parent;
+
+ np = dev->of_node;
+
+ num = of_property_count_u32_elems(np, "qcom,tsens-slopes");
+ if (num <= 0) {
+ dev_err(dev, "invalid tsens slopes\n");
+ return -EINVAL;
+ }
+
+ tmdev = devm_kzalloc(dev, sizeof(*tmdev) +
+ num * sizeof(*s), GFP_KERNEL);
+ if (!tmdev)
+ return -ENOMEM;
+
+ tmdev->dev = dev;
+ tmdev->num_sensors = num;
+
+ for (i = 0, s = tmdev->sensor; i < tmdev->num_sensors; i++, s++)
+ of_property_read_u32_index(np, "qcom,tsens-slopes", i,
+ &s->slope);
+
+ id = of_match_node(tsens_table, np);
+ if (id)
+ tmdev->ops = id->data;
+ else
+ tmdev->ops = &ops_8960;
+
+ if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->calibrate ||
+ !tmdev->ops->get_temp)
+ return -EINVAL;
+
+ ret = tmdev->ops->init(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens init failed\n");
+ return ret;
+ }
+
+ ret = tmdev->ops->calibrate(tmdev);
+ if (ret < 0) {
+ dev_err(dev, "tsens calibration failed\n");
+ return ret;
+ }
+
+ ret = tsens_register(tmdev);
+
+ platform_set_drvdata(pdev, tmdev);
+
+ return ret;
+}
+
+static int tsens_remove(struct platform_device *pdev)
+{
+ int i;
+ struct tsens_device *tmdev = platform_get_drvdata(pdev);
+ struct thermal_zone_device *tzd;
+
+ if (tmdev->ops->disable)
+ tmdev->ops->disable(tmdev);
+
+ for (i = 0; i < tmdev->num_sensors; i++) {
+ tzd = tmdev->sensor[i].tzd;
+ thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
+ }
+
+ return 0;
+}
+
+static struct platform_driver tsens_driver = {
+ .probe = tsens_probe,
+ .remove = tsens_remove,
+ .driver = {
+ .name = "qcom-tsens",
+ .pm = &tsens_pm_ops,
+ .of_match_table = tsens_table,
+ },
+};
+module_platform_driver(tsens_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QCOM Temperature Sensor driver");
+MODULE_ALIAS("platform:qcom-tsens");
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
new file mode 100644
index 000000000000..10e2e845a151
--- /dev/null
+++ b/drivers/thermal/qcom/tsens.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_TSENS_H__
+#define __QCOM_TSENS_H__
+
+#define ONE_PT_CALIB 0x1
+#define ONE_PT_CALIB2 0x2
+#define TWO_PT_CALIB 0x3
+
+struct tsens_device;
+
+struct tsens_sensor {
+ struct tsens_device *tmdev;
+ struct thermal_zone_device *tzd;
+ int offset;
+ int id;
+ int hw_id;
+ u32 slope;
+ u32 status;
+};
+
+struct tsens_ops {
+ /* mandatory callbacks */
+ int (*init)(struct tsens_device *);
+ int (*calibrate)(struct tsens_device *);
+ int (*get_temp)(struct tsens_device *, int, int *);
+ /* optional callbacks */
+ int (*enable)(struct tsens_device *, int);
+ void (*disable)(struct tsens_device *);
+ int (*suspend)(struct tsens_device *);
+ int (*resume)(struct tsens_device *);
+ int (*get_trend)(struct tsens_device *, int, long *);
+};
+
+/* Registers to be saved/restored across a context loss */
+struct tsens_context {
+ int threshold;
+ int control;
+};
+
+struct tsens_device {
+ struct device *dev;
+ u32 num_sensors;
+ struct regmap *map;
+ struct regmap_field *status_field;
+ struct tsens_context ctx;
+ bool trdy;
+ const struct tsens_ops *ops;
+ struct tsens_sensor sensor[0];
+};
+
+char *qfprom_read(struct device *, const char *);
+void compute_intercept_slope(struct tsens_device *, u32 *, u32 *, u32);
+int init_common(struct tsens_device *);
+int get_temp_common(struct tsens_device *, int, int *);
+
+extern const struct tsens_ops ops_8960, ops_8916, ops_8974;
+
+#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index b73889c8ed4b..2d779b1f010f 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -20,6 +20,8 @@
#endif
#include <linux/atomic.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/io.h>
@@ -31,6 +33,7 @@
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
+#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -39,6 +42,11 @@
#include "msm_serial.h"
+#define UARTDM_BURST_SIZE 16 /* in bytes */
+#define UARTDM_TX_AIGN(x) ((x) & ~0x3) /* valid for > 1p3 */
+#define UARTDM_TX_MAX 256 /* in bytes, valid for <= 1p3 */
+#define UARTDM_RX_SIZE (UART_XMIT_SIZE / 4)
+
enum {
UARTDM_1P1 = 1,
UARTDM_1P2,
@@ -46,6 +54,17 @@ enum {
UARTDM_1P4,
};
+struct msm_dma {
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+ dma_addr_t phys;
+ unsigned char *virt;
+ dma_cookie_t cookie;
+ u32 enable_bit;
+ unsigned int count;
+ struct dma_async_tx_descriptor *desc;
+};
+
struct msm_port {
struct uart_port uart;
char name[16];
@@ -55,9 +74,153 @@ struct msm_port {
int is_uartdm;
unsigned int old_snap_state;
bool break_detected;
+ struct msm_dma tx_dma;
+ struct msm_dma rx_dma;
};
-static inline void wait_for_xmitr(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port);
+static void msm_start_rx_dma(struct msm_port *msm_port);
+
+void msm_stop_dma(struct uart_port *port, struct msm_dma *dma)
+{
+ struct device *dev = port->dev;
+ unsigned int mapped;
+ u32 val;
+
+ mapped = dma->count;
+ dma->count = 0;
+
+ dmaengine_terminate_all(dma->chan);
+
+ /*
+ * DMA Stall happens if enqueue and flush command happens concurrently.
+ * For example before changing the baud rate/protocol configuration and
+ * sending flush command to ADM, disable the channel of UARTDM.
+ * Note: should not reset the receiver here immediately as it is not
+ * suggested to do disable/reset or reset/disable at the same time.
+ */
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (mapped)
+ dma_unmap_single(dev, dma->phys, mapped, dma->dir);
+}
+
+static void msm_release_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma;
+
+ dma = &msm_port->tx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+
+ dma = &msm_port->rx_dma;
+ if (dma->chan) {
+ msm_stop_dma(&msm_port->uart, dma);
+ dma_release_channel(dma->chan);
+ kfree(dma->virt);
+ }
+
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->tx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(dma->chan))
+ goto no_tx;
+
+ of_property_read_u32(dev->of_node, "qcom,tx-crci", &crci);
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_MEM_TO_DEV;
+ conf.device_fc = true;
+ conf.dst_addr = base + UARTDM_TF;
+ conf.dst_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto rel_tx;
+
+ dma->dir = DMA_TO_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_TX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_TX_BAM_ENABLE;
+
+ return;
+
+rel_tx:
+ dma_release_channel(dma->chan);
+no_tx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
+{
+ struct device *dev = msm_port->uart.dev;
+ struct dma_slave_config conf;
+ struct msm_dma *dma;
+ u32 crci = 0;
+ int ret;
+
+ dma = &msm_port->rx_dma;
+
+ /* allocate DMA resources, if available */
+ dma->chan = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(dma->chan))
+ goto no_rx;
+
+ of_property_read_u32(dev->of_node, "qcom,rx-crci", &crci);
+
+ dma->virt = kzalloc(UARTDM_RX_SIZE, GFP_KERNEL);
+ if (!dma->virt)
+ goto rel_rx;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = DMA_DEV_TO_MEM;
+ conf.device_fc = true;
+ conf.src_addr = base + UARTDM_RF;
+ conf.src_maxburst = UARTDM_BURST_SIZE;
+ conf.slave_id = crci;
+
+ ret = dmaengine_slave_config(dma->chan, &conf);
+ if (ret)
+ goto err;
+
+ dma->dir = DMA_FROM_DEVICE;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ dma->enable_bit = UARTDM_DMEN_RX_DM_ENABLE;
+ else
+ dma->enable_bit = UARTDM_DMEN_RX_BAM_ENABLE;
+
+ return;
+err:
+ kfree(dma->virt);
+rel_rx:
+ dma_release_channel(dma->chan);
+no_rx:
+ memset(dma, 0, sizeof(*dma));
+}
+
+static inline void msm_wait_for_xmitr(struct uart_port *port)
{
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
@@ -78,17 +241,277 @@ static void msm_stop_tx(struct uart_port *port)
static void msm_start_tx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->tx_dma;
+
+ /* Already started in DMA mode */
+ if (dma->count)
+ return;
+
+ msm_port->imr |= UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+}
+
+static void msm_reset_dm_count(struct uart_port *port, int count)
+{
+ msm_wait_for_xmitr(port);
+ msm_write(port, count, UARTDM_NCF_TX);
+ msm_read(port, UARTDM_NCF_TX);
+}
+
+static void msm_complete_tx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned long flags;
+ unsigned int count;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ status = dmaengine_tx_status(dma->chan, dma->cookie, &state);
+
+ dma_unmap_single(port->dev, dma->phys, dma->count, dma->dir);
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ msm_write(port, UART_CR_CMD_RESET_TX, UART_CR);
+ msm_write(port, UART_CR_TX_ENABLE, UART_CR);
+ }
+
+ count = dma->count - state.residue;
+ port->icount.tx += count;
+ dma->count = 0;
+
+ xmit->tail += count;
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ /* Restore "Tx FIFO below watermark" interrupt */
msm_port->imr |= UART_IMR_TXLEV;
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ msm_handle_tx(port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count)
+{
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct uart_port *port = &msm_port->uart;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ void *cpu_addr;
+ int ret;
+ u32 val;
+
+ cpu_addr = &xmit->buf[xmit->tail];
+
+ dma->phys = dma_map_single(port->dev, cpu_addr, count, dma->dir);
+ ret = dma_mapping_error(port->dev, dma->phys);
+ if (ret)
+ return ret;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ count, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_PREP_FENCE);
+ if (!dma->desc) {
+ ret = -EIO;
+ goto unmap;
+ }
+
+ dma->desc->callback = msm_complete_tx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+
+ /*
+ * Using DMA complete for Tx FIFO reload, no need for
+ * "Tx FIFO below watermark" one, disable it
+ */
+ msm_port->imr &= ~UART_IMR_TXLEV;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ dma->count = count;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(port, val, UARTDM_DMEN);
+
+ msm_reset_dm_count(port, count);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(port, val, UARTDM_DMEN);
+
+ dma_async_issue_pending(dma->chan);
+ return 0;
+unmap:
+ dma_unmap_single(port->dev, dma->phys, count, dma->dir);
+ return ret;
+}
+
+static void msm_complete_rx_dma(void *args)
+{
+ struct msm_port *msm_port = args;
+ struct uart_port *port = &msm_port->uart;
+ struct tty_port *tport = &port->state->port;
+ struct msm_dma *dma = &msm_port->rx_dma;
+ int count = 0, i, sysrq;
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* Already stopped */
+ if (!dma->count)
+ goto done;
+
+ val = msm_read(port, UARTDM_DMEN);
+ val &= ~dma->enable_bit;
+ msm_write(port, val, UARTDM_DMEN);
+
+ /* Restore interrupts */
+ msm_port->imr |= UART_IMR_RXLEV | UART_IMR_RXSTALE;
+ msm_write(port, msm_port->imr, UART_IMR);
+
+ if (msm_read(port, UART_SR) & UART_SR_OVERRUN) {
+ port->icount.overrun++;
+ tty_insert_flip_char(tport, 0, TTY_OVERRUN);
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ }
+
+ count = msm_read(port, UARTDM_RX_TOTAL_SNAP);
+
+ port->icount.rx += count;
+
+ dma->count = 0;
+
+ dma_unmap_single(port->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
+
+ for (i = 0; i < count; i++) {
+ char flag = TTY_NORMAL;
+
+ if (msm_port->break_detected && dma->virt[i] == 0) {
+ port->icount.brk++;
+ flag = TTY_BREAK;
+ msm_port->break_detected = false;
+ if (uart_handle_break(port))
+ continue;
+ }
+
+ if (!(port->read_status_mask & UART_SR_RX_BREAK))
+ flag = TTY_NORMAL;
+
+ spin_unlock_irqrestore(&port->lock, flags);
+ sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
+ spin_lock_irqsave(&port->lock, flags);
+ if (!sysrq)
+ tty_insert_flip_char(tport, dma->virt[i], flag);
+ }
+
+ msm_start_rx_dma(msm_port);
+done:
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ if (count)
+ tty_flip_buffer_push(tport);
+}
+
+static void msm_start_rx_dma(struct msm_port *msm_port)
+{
+ struct msm_dma *dma = &msm_port->rx_dma;
+ struct uart_port *uart = &msm_port->uart;
+ u32 val;
+ int ret;
+
+ if (!dma->chan)
+ return;
+
+ dma->phys = dma_map_single(uart->dev, dma->virt,
+ UARTDM_RX_SIZE, dma->dir);
+ ret = dma_mapping_error(uart->dev, dma->phys);
+ if (ret)
+ return;
+
+ dma->desc = dmaengine_prep_slave_single(dma->chan, dma->phys,
+ UARTDM_RX_SIZE, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!dma->desc)
+ goto unmap;
+
+ dma->desc->callback = msm_complete_rx_dma;
+ dma->desc->callback_param = msm_port;
+
+ dma->cookie = dmaengine_submit(dma->desc);
+ ret = dma_submit_error(dma->cookie);
+ if (ret)
+ goto unmap;
+ /*
+ * Using DMA for FIFO off-load, no need for "Rx FIFO over
+ * watermark" or "stale" interrupts, disable them
+ */
+ msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
+
+ /*
+ * Well, when DMA is ADM3 engine(implied by <= UARTDM v1.3),
+ * we need RXSTALE to flush input DMA fifo to memory
+ */
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_port->imr |= UART_IMR_RXSTALE;
+
+ msm_write(uart, msm_port->imr, UART_IMR);
+
+ dma->count = UARTDM_RX_SIZE;
+
+ dma_async_issue_pending(dma->chan);
+
+ msm_write(uart, UART_CR_CMD_RESET_STALE_INT, UART_CR);
+ msm_write(uart, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ val = msm_read(uart, UARTDM_DMEN);
+ val |= dma->enable_bit;
+
+ if (msm_port->is_uartdm < UARTDM_1P4)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ msm_write(uart, UARTDM_RX_SIZE, UARTDM_DMRX);
+
+ if (msm_port->is_uartdm > UARTDM_1P3)
+ msm_write(uart, val, UARTDM_DMEN);
+
+ return;
+unmap:
+ dma_unmap_single(uart->dev, dma->phys, UARTDM_RX_SIZE, dma->dir);
}
static void msm_stop_rx(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
msm_port->imr &= ~(UART_IMR_RXLEV | UART_IMR_RXSTALE);
msm_write(port, msm_port->imr, UART_IMR);
+
+ if (dma->chan)
+ msm_stop_dma(port, dma);
}
static void msm_enable_ms(struct uart_port *port)
@@ -99,7 +522,7 @@ static void msm_enable_ms(struct uart_port *port)
msm_write(port, msm_port->imr, UART_IMR);
}
-static void handle_rx_dm(struct uart_port *port, unsigned int misr)
+static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -169,9 +592,12 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
msm_write(port, UART_CR_CMD_STALE_EVENT_ENABLE, UART_CR);
+
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
}
-static void handle_rx(struct uart_port *port)
+static void msm_handle_rx(struct uart_port *port)
{
struct tty_port *tport = &port->state->port;
unsigned int sr;
@@ -224,18 +650,11 @@ static void handle_rx(struct uart_port *port)
spin_lock(&port->lock);
}
-static void reset_dm_count(struct uart_port *port, int count)
-{
- wait_for_xmitr(port);
- msm_write(port, count, UARTDM_NCF_TX);
- msm_read(port, UARTDM_NCF_TX);
-}
-
-static void handle_tx(struct uart_port *port)
+static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count)
{
struct circ_buf *xmit = &port->state->xmit;
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int tx_count, num_chars;
+ unsigned int num_chars;
unsigned int tf_pointer = 0;
void __iomem *tf;
@@ -244,20 +663,8 @@ static void handle_tx(struct uart_port *port)
else
tf = port->membase + UART_TF;
- tx_count = uart_circ_chars_pending(xmit);
- tx_count = min3(tx_count, (unsigned int)UART_XMIT_SIZE - xmit->tail,
- port->fifosize);
-
- if (port->x_char) {
- if (msm_port->is_uartdm)
- reset_dm_count(port, tx_count + 1);
-
- iowrite8_rep(tf, &port->x_char, 1);
- port->icount.tx++;
- port->x_char = 0;
- } else if (tx_count && msm_port->is_uartdm) {
- reset_dm_count(port, tx_count);
- }
+ if (tx_count && msm_port->is_uartdm)
+ msm_reset_dm_count(port, tx_count);
while (tf_pointer < tx_count) {
int i;
@@ -290,7 +697,60 @@ static void handle_tx(struct uart_port *port)
uart_write_wakeup(port);
}
-static void handle_delta_cts(struct uart_port *port)
+static void msm_handle_tx(struct uart_port *port)
+{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct circ_buf *xmit = &msm_port->uart.state->xmit;
+ struct msm_dma *dma = &msm_port->tx_dma;
+ unsigned int pio_count, dma_count, dma_min;
+ void __iomem *tf;
+ int err = 0;
+
+ if (port->x_char) {
+ if (msm_port->is_uartdm)
+ tf = port->membase + UARTDM_TF;
+ else
+ tf = port->membase + UART_TF;
+
+ if (msm_port->is_uartdm)
+ msm_reset_dm_count(port, 1);
+
+ iowrite8_rep(tf, &port->x_char, 1);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ msm_stop_tx(port);
+ return;
+ }
+
+ pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+
+ dma_min = 1; /* Always DMA */
+ if (msm_port->is_uartdm > UARTDM_1P3) {
+ dma_count = UARTDM_TX_AIGN(dma_count);
+ dma_min = UARTDM_BURST_SIZE;
+ } else {
+ if (dma_count > UARTDM_TX_MAX)
+ dma_count = UARTDM_TX_MAX;
+ }
+
+ if (pio_count > port->fifosize)
+ pio_count = port->fifosize;
+
+ if (!dma->chan || dma_count < dma_min)
+ msm_handle_tx_pio(port, pio_count);
+ else
+ err = msm_handle_tx_dma(msm_port, dma_count);
+
+ if (err) /* fall back to PIO mode */
+ msm_handle_tx_pio(port, pio_count);
+}
+
+static void msm_handle_delta_cts(struct uart_port *port)
{
msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
port->icount.cts++;
@@ -301,9 +761,12 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
+ unsigned long flags;
unsigned int misr;
+ u32 val;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
misr = msm_read(port, UART_MISR);
msm_write(port, 0, UART_IMR); /* disable interrupt */
@@ -313,18 +776,29 @@ static irqreturn_t msm_irq(int irq, void *dev_id)
}
if (misr & (UART_IMR_RXLEV | UART_IMR_RXSTALE)) {
- if (msm_port->is_uartdm)
- handle_rx_dm(port, misr);
- else
- handle_rx(port);
+ if (dma->count) {
+ val = UART_CR_CMD_STALE_EVENT_DISABLE;
+ msm_write(port, val, UART_CR);
+ val = UART_CR_CMD_RESET_STALE_INT;
+ msm_write(port, val, UART_CR);
+ /*
+ * Flush DMA input fifo to memory, this will also
+ * trigger DMA RX completion
+ */
+ dmaengine_terminate_all(dma->chan);
+ } else if (msm_port->is_uartdm) {
+ msm_handle_rx_dm(port, misr);
+ } else {
+ msm_handle_rx(port);
+ }
}
if (misr & UART_IMR_TXLEV)
- handle_tx(port);
+ msm_handle_tx(port);
if (misr & UART_IMR_DELTA_CTS)
- handle_delta_cts(port);
+ msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, UART_IMR); /* restore interrupt */
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
@@ -408,6 +882,7 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
{ 3, 0xdd, 8 },
{ 2, 0xee, 16 },
{ 1, 0xff, 31 },
+ { 0, 0xff, 31 },
};
divisor = uart_get_divisor(port, baud);
@@ -419,21 +894,41 @@ msm_find_best_baud(struct uart_port *port, unsigned int baud)
return entry; /* Default to smallest divider */
}
-static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
+static int msm_set_baud_rate(struct uart_port *port, unsigned int baud,
+ unsigned long *saved_flags)
{
- unsigned int rxstale, watermark;
+ unsigned int rxstale, watermark, mask;
struct msm_port *msm_port = UART_TO_MSM(port);
const struct msm_baud_map *entry;
+ unsigned long flags;
entry = msm_find_best_baud(port, baud);
msm_write(port, entry->code, UART_CSR);
+ if (baud > 460800)
+ port->uartclk = baud * 16;
+
+ flags = *saved_flags;
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ clk_set_rate(msm_port->clk, port->uartclk);
+
+ spin_lock_irqsave(&port->lock, flags);
+ *saved_flags = flags;
+
/* RX stale watermark */
rxstale = entry->rxstale;
watermark = UART_IPR_STALE_LSB & rxstale;
- watermark |= UART_IPR_RXSTALE_LAST;
- watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
+ if (msm_port->is_uartdm) {
+ mask = UART_DM_IPR_STALE_TIMEOUT_MSB;
+ } else {
+ watermark |= UART_IPR_RXSTALE_LAST;
+ mask = UART_IPR_STALE_TIMEOUT_MSB;
+ }
+
+ watermark |= mask & (rxstale << 2);
+
msm_write(port, watermark, UART_IPR);
/* set RX watermark */
@@ -476,7 +971,7 @@ static void msm_init_clock(struct uart_port *port)
static int msm_startup(struct uart_port *port)
{
struct msm_port *msm_port = UART_TO_MSM(port);
- unsigned int data, rfr_level;
+ unsigned int data, rfr_level, mask;
int ret;
snprintf(msm_port->name, sizeof(msm_port->name),
@@ -496,11 +991,23 @@ static int msm_startup(struct uart_port *port)
/* set automatic RFR level */
data = msm_read(port, UART_MR1);
- data &= ~UART_MR1_AUTO_RFR_LEVEL1;
+
+ if (msm_port->is_uartdm)
+ mask = UART_DM_MR1_AUTO_RFR_LEVEL1;
+ else
+ mask = UART_MR1_AUTO_RFR_LEVEL1;
+
+ data &= ~mask;
data &= ~UART_MR1_AUTO_RFR_LEVEL0;
- data |= UART_MR1_AUTO_RFR_LEVEL1 & (rfr_level << 2);
+ data |= mask & (rfr_level << 2);
data |= UART_MR1_AUTO_RFR_LEVEL0 & rfr_level;
msm_write(port, data, UART_MR1);
+
+ if (msm_port->is_uartdm) {
+ msm_request_tx_dma(msm_port, msm_port->uart.mapbase);
+ msm_request_rx_dma(msm_port, msm_port->uart.mapbase);
+ }
+
return 0;
}
@@ -511,6 +1018,9 @@ static void msm_shutdown(struct uart_port *port)
msm_port->imr = 0;
msm_write(port, 0, UART_IMR); /* disable interrupts */
+ if (msm_port->is_uartdm)
+ msm_release_dma(msm_port);
+
clk_disable_unprepare(msm_port->clk);
free_irq(port->irq, port);
@@ -519,14 +1029,19 @@ static void msm_shutdown(struct uart_port *port)
static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
+ struct msm_port *msm_port = UART_TO_MSM(port);
+ struct msm_dma *dma = &msm_port->rx_dma;
unsigned long flags;
unsigned int baud, mr;
spin_lock_irqsave(&port->lock, flags);
+ if (dma->chan) /* Terminate if any */
+ msm_stop_dma(port, dma);
+
/* calculate and set baud rate */
- baud = uart_get_baud_rate(port, termios, old, 300, 115200);
- baud = msm_set_baud_rate(port, baud);
+ baud = uart_get_baud_rate(port, termios, old, 300, 4000000);
+ baud = msm_set_baud_rate(port, baud, &flags);
if (tty_termios_baud_rate(termios))
tty_termios_encode_baud_rate(termios, baud, baud);
@@ -588,6 +1103,9 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
+ /* Try to use DMA */
+ msm_start_rx_dma(msm_port);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -765,7 +1283,7 @@ static void msm_poll_put_char(struct uart_port *port, unsigned char c)
msm_write(port, 0, UART_IMR);
if (msm_port->is_uartdm)
- reset_dm_count(port, 1);
+ msm_reset_dm_count(port, 1);
/* Wait until FIFO is empty */
while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
@@ -839,7 +1357,7 @@ static struct msm_port msm_uart_ports[] = {
#define UART_NR ARRAY_SIZE(msm_uart_ports)
-static inline struct uart_port *get_port_from_line(unsigned int line)
+static inline struct uart_port *msm_get_port_from_line(unsigned int line)
{
return &msm_uart_ports[line].uart;
}
@@ -866,7 +1384,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
spin_lock(&port->lock);
if (is_uartdm)
- reset_dm_count(port, count);
+ msm_reset_dm_count(port, count);
i = 0;
while (i < count) {
@@ -911,7 +1429,7 @@ static void msm_console_write(struct console *co, const char *s,
BUG_ON(co->index < 0 || co->index >= UART_NR);
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
__msm_console_write(port, s, count, msm_port->is_uartdm);
@@ -928,7 +1446,7 @@ static int __init msm_console_setup(struct console *co, char *options)
if (unlikely(co->index >= UART_NR || co->index < 0))
return -ENXIO;
- port = get_port_from_line(co->index);
+ port = msm_get_port_from_line(co->index);
if (unlikely(!port->membase))
return -ENXIO;
@@ -1043,7 +1561,7 @@ static int msm_serial_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "msm_serial: detected port #%d\n", line);
- port = get_port_from_line(line);
+ port = msm_get_port_from_line(line);
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index 737f69fe7113..bc1d7b39eba8 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -20,11 +20,12 @@
#define UART_MR1_AUTO_RFR_LEVEL0 0x3F
#define UART_MR1_AUTO_RFR_LEVEL1 0x3FF00
-#define UART_MR1_RX_RDY_CTL (1 << 7)
-#define UART_MR1_CTS_CTL (1 << 6)
+#define UART_DM_MR1_AUTO_RFR_LEVEL1 0xFFFFFF00
+#define UART_MR1_RX_RDY_CTL BIT(7)
+#define UART_MR1_CTS_CTL BIT(6)
#define UART_MR2 0x0004
-#define UART_MR2_ERROR_MODE (1 << 6)
+#define UART_MR2_ERROR_MODE BIT(6)
#define UART_MR2_BITS_PER_CHAR 0x30
#define UART_MR2_BITS_PER_CHAR_5 (0x0 << 4)
#define UART_MR2_BITS_PER_CHAR_6 (0x1 << 4)
@@ -58,26 +59,28 @@
#define UART_CR_CMD_SET_RFR (13 << 4)
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
-#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_STALE_EVENT_DISABLE (6 << 8)
+#define UART_CR_CMD_STALE_EVENT_ENABLE (5 << 8)
#define UART_CR_CMD_FORCE_STALE (4 << 8)
#define UART_CR_CMD_RESET_TX_READY (3 << 8)
-#define UART_CR_TX_DISABLE (1 << 3)
-#define UART_CR_TX_ENABLE (1 << 2)
-#define UART_CR_RX_DISABLE (1 << 1)
-#define UART_CR_RX_ENABLE (1 << 0)
+#define UART_CR_TX_DISABLE BIT(3)
+#define UART_CR_TX_ENABLE BIT(2)
+#define UART_CR_RX_DISABLE BIT(1)
+#define UART_CR_RX_ENABLE BIT(0)
#define UART_CR_CMD_RESET_RXBREAK_START ((1 << 11) | (2 << 4))
#define UART_IMR 0x0014
-#define UART_IMR_TXLEV (1 << 0)
-#define UART_IMR_RXSTALE (1 << 3)
-#define UART_IMR_RXLEV (1 << 4)
-#define UART_IMR_DELTA_CTS (1 << 5)
-#define UART_IMR_CURRENT_CTS (1 << 6)
-#define UART_IMR_RXBREAK_START (1 << 10)
+#define UART_IMR_TXLEV BIT(0)
+#define UART_IMR_RXSTALE BIT(3)
+#define UART_IMR_RXLEV BIT(4)
+#define UART_IMR_DELTA_CTS BIT(5)
+#define UART_IMR_CURRENT_CTS BIT(6)
+#define UART_IMR_RXBREAK_START BIT(10)
#define UART_IPR_RXSTALE_LAST 0x20
#define UART_IPR_STALE_LSB 0x1F
#define UART_IPR_STALE_TIMEOUT_MSB 0x3FF80
+#define UART_DM_IPR_STALE_TIMEOUT_MSB 0xFFFFFF80
#define UART_IPR 0x0018
#define UART_TFWR 0x001C
@@ -96,20 +99,20 @@
#define UART_TEST_CTRL 0x0050
#define UART_SR 0x0008
-#define UART_SR_HUNT_CHAR (1 << 7)
-#define UART_SR_RX_BREAK (1 << 6)
-#define UART_SR_PAR_FRAME_ERR (1 << 5)
-#define UART_SR_OVERRUN (1 << 4)
-#define UART_SR_TX_EMPTY (1 << 3)
-#define UART_SR_TX_READY (1 << 2)
-#define UART_SR_RX_FULL (1 << 1)
-#define UART_SR_RX_READY (1 << 0)
+#define UART_SR_HUNT_CHAR BIT(7)
+#define UART_SR_RX_BREAK BIT(6)
+#define UART_SR_PAR_FRAME_ERR BIT(5)
+#define UART_SR_OVERRUN BIT(4)
+#define UART_SR_TX_EMPTY BIT(3)
+#define UART_SR_TX_READY BIT(2)
+#define UART_SR_RX_FULL BIT(1)
+#define UART_SR_RX_READY BIT(0)
#define UART_RF 0x000C
#define UARTDM_RF 0x0070
#define UART_MISR 0x0010
#define UART_ISR 0x0014
-#define UART_ISR_TX_READY (1 << 7)
+#define UART_ISR_TX_READY BIT(7)
#define UARTDM_RXFS 0x50
#define UARTDM_RXFS_BUF_SHIFT 0x7
@@ -119,6 +122,12 @@
#define UARTDM_DMEN_RX_SC_ENABLE BIT(5)
#define UARTDM_DMEN_TX_SC_ENABLE BIT(4)
+#define UARTDM_DMEN_TX_BAM_ENABLE BIT(2) /* UARTDM_1P4 */
+#define UARTDM_DMEN_TX_DM_ENABLE BIT(0) /* < UARTDM_1P4 */
+
+#define UARTDM_DMEN_RX_BAM_ENABLE BIT(3) /* UARTDM_1P4 */
+#define UARTDM_DMEN_RX_DM_ENABLE BIT(1) /* < UARTDM_1P4 */
+
#define UARTDM_DMRX 0x34
#define UARTDM_NCF_TX 0x40
#define UARTDM_RX_TOTAL_SNAP 0x38
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 5ce3f1d6a6ed..5619b8ca3bf3 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,7 @@
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
+ select EXTCON
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. Currently, only the
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index d79ecc08a1be..3889809fd0c4 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -25,7 +25,8 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event)
case CI_HDRC_CONTROLLER_RESET_EVENT:
dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n");
writel(0, USB_AHBBURST);
- writel(0, USB_AHBMODE);
+ /* use AHB transactor, allow posted data writes */
+ writel(0x8, USB_AHBMODE);
usb_phy_init(ci->usb_phy);
break;
case CI_HDRC_CONTROLLER_STOPPED_EVENT:
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3feebf7f31f0..573c2876b263 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -47,6 +47,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/extcon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -602,9 +603,45 @@ static irqreturn_t ci_irq(int irq, void *data)
return ret;
}
+static int ci_vbus_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct ci_hdrc_cable *vbus = container_of(nb, struct ci_hdrc_cable, nb);
+ struct ci_hdrc *ci = vbus->ci;
+
+ if (event)
+ vbus->state = true;
+ else
+ vbus->state = false;
+
+ vbus->changed = true;
+
+ ci_irq(ci->irq, ci);
+ return NOTIFY_DONE;
+}
+
+static int ci_id_notifier(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct ci_hdrc_cable *id = container_of(nb, struct ci_hdrc_cable, nb);
+ struct ci_hdrc *ci = id->ci;
+
+ if (event)
+ id->state = false;
+ else
+ id->state = true;
+
+ id->changed = true;
+
+ ci_irq(ci->irq, ci);
+ return NOTIFY_DONE;
+}
+
static int ci_get_platdata(struct device *dev,
struct ci_hdrc_platform_data *platdata)
{
+ struct extcon_dev *ext_vbus, *ext_id;
+ struct ci_hdrc_cable *cable;
int ret;
if (!platdata->phy_mode)
@@ -695,9 +732,91 @@ static int ci_get_platdata(struct device *dev,
platdata->flags |= CI_HDRC_OVERRIDE_RX_BURST;
}
+ ext_id = ERR_PTR(-ENODEV);
+ ext_vbus = ERR_PTR(-ENODEV);
+ if (of_property_read_bool(dev->of_node, "extcon")) {
+ /* Each one of them is not mandatory */
+ ext_vbus = extcon_get_edev_by_phandle(dev, 0);
+ if (IS_ERR(ext_vbus) && PTR_ERR(ext_vbus) != -ENODEV)
+ return PTR_ERR(ext_vbus);
+
+ ext_id = extcon_get_edev_by_phandle(dev, 1);
+ if (IS_ERR(ext_id) && PTR_ERR(ext_id) != -ENODEV)
+ return PTR_ERR(ext_id);
+ }
+
+ cable = &platdata->vbus_extcon;
+ cable->nb.notifier_call = ci_vbus_notifier;
+ cable->edev = ext_vbus;
+
+ if (!IS_ERR(ext_vbus)) {
+ ret = extcon_get_cable_state_(cable->edev, EXTCON_USB);
+ if (ret)
+ cable->state = true;
+ else
+ cable->state = false;
+ }
+
+ cable = &platdata->id_extcon;
+ cable->nb.notifier_call = ci_id_notifier;
+ cable->edev = ext_id;
+
+ if (!IS_ERR(ext_id)) {
+ ret = extcon_get_cable_state_(cable->edev, EXTCON_USB_HOST);
+ if (ret)
+ cable->state = false;
+ else
+ cable->state = true;
+ }
return 0;
}
+static int ci_extcon_register(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *id, *vbus;
+ int ret;
+
+ id = &ci->platdata->id_extcon;
+ id->ci = ci;
+ if (!IS_ERR(id->edev)) {
+ ret = extcon_register_notifier(id->edev, EXTCON_USB_HOST,
+ &id->nb);
+ if (ret < 0) {
+ dev_err(ci->dev, "register ID failed\n");
+ return ret;
+ }
+ }
+
+ vbus = &ci->platdata->vbus_extcon;
+ vbus->ci = ci;
+ if (!IS_ERR(vbus->edev)) {
+ ret = extcon_register_notifier(vbus->edev, EXTCON_USB,
+ &vbus->nb);
+ if (ret < 0) {
+ extcon_unregister_notifier(id->edev, EXTCON_USB_HOST,
+ &id->nb);
+ dev_err(ci->dev, "register VBUS failed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_extcon_unregister(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *cable;
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev))
+ extcon_unregister_notifier(cable->edev, EXTCON_USB_HOST,
+ &cable->nb);
+
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev))
+ extcon_unregister_notifier(cable->edev, EXTCON_USB, &cable->nb);
+}
+
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -921,6 +1040,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (ret)
goto stop;
+ ret = ci_extcon_register(ci);
+ if (ret)
+ goto stop;
+
if (ci->supports_runtime_pm) {
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -938,6 +1061,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ret)
return 0;
+ ci_extcon_unregister(ci);
stop:
ci_role_destroy(ci);
deinit_phy:
@@ -957,6 +1081,7 @@ static int ci_hdrc_remove(struct platform_device *pdev)
}
dbg_remove_files(ci);
+ ci_extcon_unregister(ci);
ci_role_destroy(ci);
ci_hdrc_enter_lpm(ci, true);
ci_usb_phy_exit(ci);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index ad6c87a4653c..ab4bd0c2d4ef 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -30,7 +30,44 @@
*/
u32 hw_read_otgsc(struct ci_hdrc *ci, u32 mask)
{
- return hw_read(ci, OP_OTGSC, mask);
+ struct ci_hdrc_cable *cable;
+ u32 val = hw_read(ci, OP_OTGSC, mask);
+
+ /*
+ * If using extcon framework for VBUS and/or ID signal
+ * detection overwrite OTGSC register value
+ */
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (cable->changed)
+ val |= OTGSC_BSVIS;
+ else
+ val &= ~OTGSC_BSVIS;
+
+ cable->changed = false;
+
+ if (cable->state)
+ val |= OTGSC_BSV;
+ else
+ val &= ~OTGSC_BSV;
+ }
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (cable->changed)
+ val |= OTGSC_IDIS;
+ else
+ val &= ~OTGSC_IDIS;
+
+ cable->changed = false;
+
+ if (cable->state)
+ val |= OTGSC_ID;
+ else
+ val &= ~OTGSC_ID;
+ }
+
+ return val;
}
/**
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 275c92e53a59..1d9cbf289406 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -57,8 +57,8 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
/* bursts of unspecified length. */
writel(0, USB_AHBBURST);
- /* Use the AHB transactor */
- writel(0, USB_AHBMODE);
+ /* Use the AHB transactor, allow posted data writes */
+ writel(0x8, USB_AHBMODE);
/* Disable streaming mode and select host mode */
writel(0x13, USB_USBMODE);
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index c58c3c0dbe35..a51f74c40631 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -76,6 +76,9 @@ static int msm_hsusb_init_vddcx(struct msm_otg *motg, int init)
{
int ret = 0;
+ if (IS_ERR(motg->vddcx))
+ return 0;
+
if (init) {
ret = regulator_set_voltage(motg->vddcx,
motg->vdd_levels[VDD_LEVEL_MIN],
@@ -1598,6 +1601,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
ret = extcon_register_notifier(ext_id, EXTCON_USB_HOST,
&motg->id.nb);
if (ret < 0) {
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
dev_err(&pdev->dev, "register ID notifier failed\n");
return ret;
}
@@ -1647,7 +1652,7 @@ static int msm_otg_reboot_notify(struct notifier_block *this,
static int msm_otg_probe(struct platform_device *pdev)
{
- struct regulator_bulk_data regs[3];
+ struct regulator_bulk_data regs[2];
int ret = 0;
struct device_node *np = pdev->dev.of_node;
struct msm_otg_platform_data *pdata;
@@ -1660,15 +1665,6 @@ static int msm_otg_probe(struct platform_device *pdev)
if (!motg)
return -ENOMEM;
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- if (!np)
- return -ENXIO;
- ret = msm_otg_read_dt(pdev, motg);
- if (ret)
- return ret;
- }
-
motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
GFP_KERNEL);
if (!motg->phy.otg)
@@ -1676,6 +1672,8 @@ static int msm_otg_probe(struct platform_device *pdev)
phy = &motg->phy;
phy->dev = &pdev->dev;
+ INIT_WORK(&motg->sm_work, msm_otg_sm_work);
+ INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
motg->clk = devm_clk_get(&pdev->dev, np ? "core" : "usb_hs_clk");
if (IS_ERR(motg->clk)) {
@@ -1731,17 +1729,29 @@ static int msm_otg_probe(struct platform_device *pdev)
return motg->irq;
}
- regs[0].supply = "vddcx";
- regs[1].supply = "v3p3";
- regs[2].supply = "v1p8";
+ regs[0].supply = "v3p3";
+ regs[1].supply = "v1p8";
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ if (!np)
+ return -ENXIO;
+ ret = msm_otg_read_dt(pdev, motg);
+ if (ret)
+ return ret;
+ }
ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "no v3p3 or v1p8\n");
return ret;
+ }
+
+ motg->v3p3 = regs[0].consumer;
+ motg->v1p8 = regs[1].consumer;
- motg->vddcx = regs[0].consumer;
- motg->v3p3 = regs[1].consumer;
- motg->v1p8 = regs[2].consumer;
+ motg->vddcx = devm_regulator_get_optional(motg->phy.dev, "vddcx");
+ if (IS_ERR(motg->vddcx))
+ dev_info(&pdev->dev, "no vddcx\n");
clk_set_rate(motg->clk, 60000000);
@@ -1771,8 +1781,6 @@ static int msm_otg_probe(struct platform_device *pdev)
writel(0, USB_USBINTR);
writel(0, USB_OTGSC);
- INIT_WORK(&motg->sm_work, msm_otg_sm_work);
- INIT_DELAYED_WORK(&motg->chg_work, msm_chg_detect_work);
ret = devm_request_irq(&pdev->dev, motg->irq, msm_otg_irq, IRQF_SHARED,
"msm_otg", motg);
if (ret) {
@@ -1821,7 +1829,6 @@ static int msm_otg_probe(struct platform_device *pdev)
register_reboot_notifier(&motg->reboot);
pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
return 0;
@@ -1834,6 +1841,11 @@ disable_clks:
clk_disable_unprepare(motg->clk);
if (!IS_ERR(motg->core_clk))
clk_disable_unprepare(motg->core_clk);
+
+ extcon_unregister_notifier(motg->id.extcon,
+ EXTCON_USB_HOST, &motg->id.nb);
+ extcon_unregister_notifier(motg->vbus.extcon,
+ EXTCON_USB, &motg->vbus.nb);
return ret;
}
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index faaeff7db684..f07ee6b6cb47 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1304,7 +1304,9 @@ extern void drm_property_destroy_user_blobs(struct drm_device *dev,
extern bool drm_probe_ddc(struct i2c_adapter *adapter);
extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid_early(struct i2c_adapter *adapter);
extern struct edid *drm_edid_duplicate(const struct edid *edid);
+extern bool edid_vendor(struct edid *edid, char *vendor);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_config_init(struct drm_device *dev);
extern void drm_mode_config_reset(struct drm_device *dev);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index f1d8d0dbb4f1..fc1df05aeb85 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -100,10 +100,12 @@ struct mipi_dsi_host_ops {
struct mipi_dsi_host {
struct device *dev;
const struct mipi_dsi_host_ops *ops;
+ struct list_head list;
};
int mipi_dsi_host_register(struct mipi_dsi_host *host);
void mipi_dsi_host_unregister(struct mipi_dsi_host *host);
+struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
/* DSI mode flags */
@@ -139,10 +141,13 @@ enum mipi_dsi_pixel_format {
MIPI_DSI_FMT_RGB565,
};
+#define DSI_DEV_NAME_SIZE 20
+
/**
* struct mipi_dsi_device - DSI peripheral device
* @host: DSI host for this peripheral
* @dev: driver model device node for this peripheral
+ * @name: name of the dsi peripheral
* @channel: virtual channel assigned to the peripheral
* @format: pixel format for video mode
* @lanes: number of active data lanes
@@ -152,6 +157,8 @@ struct mipi_dsi_device {
struct mipi_dsi_host *host;
struct device dev;
+ char name[DSI_DEV_NAME_SIZE];
+
unsigned int channel;
unsigned int lanes;
enum mipi_dsi_pixel_format format;
@@ -163,6 +170,8 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
return container_of(dev, struct mipi_dsi_device, dev);
}
+struct mipi_dsi_device *mipi_dsi_new_dummy(struct mipi_dsi_host *host, u32 reg);
+void mipi_dsi_unregister_device(struct mipi_dsi_device *dsi);
struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
int mipi_dsi_attach(struct mipi_dsi_device *dsi);
int mipi_dsi_detach(struct mipi_dsi_device *dsi);
diff --git a/include/dt-bindings/arm/qcom-ids.h b/include/dt-bindings/arm/qcom-ids.h
new file mode 100644
index 000000000000..a18f34e7d965
--- /dev/null
+++ b/include/dt-bindings/arm/qcom-ids.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DT_BINDINGS_QCOM_IDS_H
+#define __DT_BINDINGS_QCOM_IDS_H
+
+/* qcom,msm-id */
+#define QCOM_ID_MSM8916 206
+#define QCOM_ID_APQ8016 247
+#define QCOM_ID_MSM8216 248
+#define QCOM_ID_MSM8116 249
+#define QCOM_ID_MSM8616 250
+
+/* qcom,board-id */
+#define QCOM_BRD_ID(a, major, minor) \
+ (((major & 0xff) << 16) | ((minor & 0xff) << 8) | QCOM_BRD_ID_##a)
+
+#define QCOM_BRD_ID_MTP 8
+#define QCOM_BRD_ID_DRAGONBRD 10
+#define QCOM_BRD_ID_SBC 24
+
+#define QCOM_BRD_SUBTYPE_DEFAULT 0
+#define QCOM_BRD_SUBTYPE_MTP8916_SMB1360 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
index 2c0da566c46a..5aa7ebeae411 100644
--- a/include/dt-bindings/clock/qcom,gcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,gcc-apq8084.h
@@ -348,4 +348,10 @@
#define GCC_PCIE_1_PIPE_CLK 331
#define GCC_PCIE_1_SLV_AXI_CLK 332
+/* gdscs */
+#define USB_HS_HSIC_GDSC 0
+#define PCIE0_GDSC 1
+#define PCIE1_GDSC 2
+#define USB30_GDSC 3
+
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8916.h b/include/dt-bindings/clock/qcom,gcc-msm8916.h
index e430f644dd6c..257e2fbedd94 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8916.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8916.h
@@ -152,5 +152,35 @@
#define GCC_VENUS0_AHB_CLK 135
#define GCC_VENUS0_AXI_CLK 136
#define GCC_VENUS0_VCODEC0_CLK 137
+#define BIMC_DDR_CLK_SRC 138
+#define GCC_APSS_TCU_CLK 139
+#define GCC_GFX_TCU_CLK 140
+#define BIMC_GPU_CLK_SRC 141
+#define GCC_BIMC_GFX_CLK 142
+#define GCC_BIMC_GPU_CLK 143
+#define ULTAUDIO_LPAIF_PRI_I2S_CLK_SRC 144
+#define ULTAUDIO_LPAIF_SEC_I2S_CLK_SRC 145
+#define ULTAUDIO_LPAIF_AUX_I2S_CLK_SRC 146
+#define ULTAUDIO_XO_CLK_SRC 147
+#define ULTAUDIO_AHBFABRIC_CLK_SRC 148
+#define CODEC_DIGCODEC_CLK_SRC 149
+#define GCC_ULTAUDIO_PCNOC_MPORT_CLK 150
+#define GCC_ULTAUDIO_PCNOC_SWAY_CLK 151
+#define GCC_ULTAUDIO_AVSYNC_XO_CLK 152
+#define GCC_ULTAUDIO_STC_XO_CLK 153
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_CLK 154
+#define GCC_ULTAUDIO_AHBFABRIC_IXFABRIC_LPM_CLK 155
+#define GCC_ULTAUDIO_LPAIF_PRI_I2S_CLK 156
+#define GCC_ULTAUDIO_LPAIF_SEC_I2S_CLK 157
+#define GCC_ULTAUDIO_LPAIF_AUX_I2S_CLK 158
+#define GCC_CODEC_DIGCODEC_CLK 159
+
+/* Indexes for GDSCs */
+#define BIMC_GDSC 0
+#define VENUS_GDSC 1
+#define MDSS_GDSC 2
+#define JPEG_GDSC 3
+#define VFE_GDSC 4
+#define OXILI_GDSC 5
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8960.h b/include/dt-bindings/clock/qcom,gcc-msm8960.h
index 7d20eedfee98..e02742fc81cc 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8960.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8960.h
@@ -319,5 +319,7 @@
#define CE3_SRC 303
#define CE3_CORE_CLK 304
#define CE3_H_CLK 305
+#define PLL16 306
+#define PLL17 307
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8974.h b/include/dt-bindings/clock/qcom,gcc-msm8974.h
index 51e51c860fe6..81d32f639190 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8974.h
@@ -321,4 +321,7 @@
#define GCC_SDCC1_CDCCAL_SLEEP_CLK 304
#define GCC_SDCC1_CDCCAL_FF_CLK 305
+/* gdscs */
+#define USB_HS_HSIC_GDSC 0
+
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
index d72b5b35f15e..21fec5dbe616 100644
--- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -180,4 +180,12 @@
#define VPU_SLEEP_CLK 163
#define VPU_VDP_CLK 164
+/* GDSCs */
+#define VENUS0_GDSC 0
+#define MDSS_GDSC 1
+#define CAMSS_JPEG_GDSC 2
+#define CAMSS_VFE_GDSC 3
+#define OXILI_GDSC 4
+#define OXILICX_GDSC 5
+
#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8974.h b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
index 032ed87ef0f3..28651e54c9ae 100644
--- a/include/dt-bindings/clock/qcom,mmcc-msm8974.h
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8974.h
@@ -158,4 +158,12 @@
#define SPDM_RM_AXI 141
#define SPDM_RM_OCMEMNOC 142
+/* gdscs */
+#define VENUS0_GDSC 0
+#define MDSS_GDSC 1
+#define CAMSS_JPEG_GDSC 2
+#define CAMSS_VFE_GDSC 3
+#define OXILI_GDSC 4
+#define OXILICX_GDSC 5
+
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmcc-msm8916.h b/include/dt-bindings/clock/qcom,rpmcc-msm8916.h
new file mode 100644
index 000000000000..62d63940896a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,rpmcc-msm8916.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2015 Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_RPMCC_8916_H
+#define _DT_BINDINGS_CLK_MSM_RPMCC_8916_H
+
+#define RPM_XO_CLK_SRC 0
+#define RPM_XO_A_CLK_SRC 1
+#define RPM_PCNOC_CLK 2
+#define RPM_PCNOC_A_CLK 3
+#define RPM_SNOC_CLK 4
+#define RPM_SNOC_A_CLK 6
+#define RPM_BIMC_CLK 7
+#define RPM_BIMC_A_CLK 8
+#define RPM_QDSS_CLK 9
+#define RPM_QDSS_A_CLK 10
+#define RPM_BB_CLK1 11
+#define RPM_BB_CLK1_A 12
+#define RPM_BB_CLK2 13
+#define RPM_BB_CLK2_A 14
+#define RPM_RF_CLK1 15
+#define RPM_RF_CLK1_A 16
+#define RPM_RF_CLK2 17
+#define RPM_RF_CLK2_A 18
+#define RPM_BB_CLK1_PIN 19
+#define RPM_BB_CLK1_A_PIN 20
+#define RPM_BB_CLK2_PIN 21
+#define RPM_BB_CLK2_A_PIN 22
+#define RPM_RF_CLK1_PIN 23
+#define RPM_RF_CLK1_A_PIN 24
+#define RPM_RF_CLK2_PIN 25
+#define RPM_RF_CLK2_A_PIN 26
+
+#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
new file mode 100644
index 000000000000..1815fa0ab136
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -0,0 +1,629 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Topology related enums */
+#define MSM_BUS_FAB_DEFAULT 0
+#define MSM_BUS_FAB_APPSS 0
+#define MSM_BUS_FAB_SYSTEM 1024
+#define MSM_BUS_FAB_MMSS 2048
+#define MSM_BUS_FAB_SYSTEM_FPB 3072
+#define MSM_BUS_FAB_CPSS_FPB 4096
+
+#define MSM_BUS_FAB_BIMC 0
+#define MSM_BUS_FAB_SYS_NOC 1024
+#define MSM_BUS_FAB_MMSS_NOC 2048
+#define MSM_BUS_FAB_OCMEM_NOC 3072
+#define MSM_BUS_FAB_PERIPH_NOC 4096
+#define MSM_BUS_FAB_CONFIG_NOC 5120
+#define MSM_BUS_FAB_OCMEM_VNOC 6144
+
+#define MSM_BUS_MASTER_FIRST 1
+#define MSM_BUS_MASTER_AMPSS_M0 1
+#define MSM_BUS_MASTER_AMPSS_M1 2
+#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define MSM_BUS_MASTER_SPS 6
+#define MSM_BUS_MASTER_ADM_PORT0 7
+#define MSM_BUS_MASTER_ADM_PORT1 8
+#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define MSM_BUS_MASTER_ADM1_PORT1 10
+#define MSM_BUS_MASTER_LPASS_PROC 11
+#define MSM_BUS_MASTER_MSS_PROCI 12
+#define MSM_BUS_MASTER_MSS_PROCD 13
+#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define MSM_BUS_MASTER_LPASS 15
+#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define MSM_BUS_MASTER_ADM1_CI 19
+#define MSM_BUS_MASTER_ADM0_CI 20
+#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define MSM_BUS_MASTER_MDP_PORT0 22
+#define MSM_BUS_MASTER_MDP_PORT1 23
+#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define MSM_BUS_MASTER_ROTATOR 25
+#define MSM_BUS_MASTER_GRAPHICS_3D 26
+#define MSM_BUS_MASTER_JPEG_DEC 27
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define MSM_BUS_MASTER_VFE 29
+#define MSM_BUS_MASTER_VPE 30
+#define MSM_BUS_MASTER_JPEG_ENC 31
+#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define MSM_BUS_MASTER_SPDM 36
+#define MSM_BUS_MASTER_RPM 37
+#define MSM_BUS_MASTER_MSS 38
+#define MSM_BUS_MASTER_RIVA 39
+#define MSM_BUS_SYSTEM_MASTER_UNUSED_6 40
+#define MSM_BUS_MASTER_MSS_SW_PROC 41
+#define MSM_BUS_MASTER_MSS_FW_PROC 42
+#define MSM_BUS_MMSS_MASTER_UNUSED_2 43
+#define MSM_BUS_MASTER_GSS_NAV 44
+#define MSM_BUS_MASTER_PCIE 45
+#define MSM_BUS_MASTER_SATA 46
+#define MSM_BUS_MASTER_CRYPTO 47
+#define MSM_BUS_MASTER_VIDEO_CAP 48
+#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define MSM_BUS_MASTER_VIDEO_ENC 50
+#define MSM_BUS_MASTER_VIDEO_DEC 51
+#define MSM_BUS_MASTER_LPASS_AHB 52
+#define MSM_BUS_MASTER_QDSS_BAM 53
+#define MSM_BUS_MASTER_SNOC_CFG 54
+#define MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define MSM_BUS_MASTER_MSS_NAV 57
+#define MSM_BUS_MASTER_OCMEM_DMA 58
+#define MSM_BUS_MASTER_WCSS 59
+#define MSM_BUS_MASTER_QDSS_ETR 60
+#define MSM_BUS_MASTER_USB3 61
+#define MSM_BUS_MASTER_JPEG 62
+#define MSM_BUS_MASTER_VIDEO_P0 63
+#define MSM_BUS_MASTER_VIDEO_P1 64
+#define MSM_BUS_MASTER_MSS_PROC 65
+#define MSM_BUS_MASTER_JPEG_OCMEM 66
+#define MSM_BUS_MASTER_MDP_OCMEM 67
+#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define MSM_BUS_MASTER_VFE_OCMEM 70
+#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define MSM_BUS_MASTER_RPM_INST 72
+#define MSM_BUS_MASTER_RPM_DATA 73
+#define MSM_BUS_MASTER_RPM_SYS 74
+#define MSM_BUS_MASTER_DEHR 75
+#define MSM_BUS_MASTER_QDSS_DAP 76
+#define MSM_BUS_MASTER_TIC 77
+#define MSM_BUS_MASTER_SDCC_1 78
+#define MSM_BUS_MASTER_SDCC_3 79
+#define MSM_BUS_MASTER_SDCC_4 80
+#define MSM_BUS_MASTER_SDCC_2 81
+#define MSM_BUS_MASTER_TSIF 82
+#define MSM_BUS_MASTER_BAM_DMA 83
+#define MSM_BUS_MASTER_BLSP_2 84
+#define MSM_BUS_MASTER_USB_HSIC 85
+#define MSM_BUS_MASTER_BLSP_1 86
+#define MSM_BUS_MASTER_USB_HS 87
+#define MSM_BUS_MASTER_PNOC_CFG 88
+#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define MSM_BUS_MASTER_IPA 90
+#define MSM_BUS_MASTER_QPIC 91
+#define MSM_BUS_MASTER_MDPE 92
+#define MSM_BUS_MASTER_USB_HS2 93
+#define MSM_BUS_MASTER_VPU 94
+#define MSM_BUS_MASTER_UFS 95
+#define MSM_BUS_MASTER_BCAST 96
+#define MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define MSM_BUS_MASTER_EMAC 98
+#define MSM_BUS_MASTER_VPU_1 99
+#define MSM_BUS_MASTER_PCIE_1 100
+#define MSM_BUS_MASTER_USB3_1 101
+#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define MSM_BUS_MASTER_TCU_0 104
+#define MSM_BUS_MASTER_TCU_1 105
+#define MSM_BUS_MASTER_CPP 106
+#define MSM_BUS_MASTER_LAST 107
+
+#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define MSM_BUS_SNOC_MM_INT_0 10000
+#define MSM_BUS_SNOC_MM_INT_1 10001
+#define MSM_BUS_SNOC_MM_INT_2 10002
+#define MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define MSM_BUS_SNOC_INT_0 10004
+#define MSM_BUS_SNOC_INT_1 10005
+#define MSM_BUS_SNOC_INT_BIMC 10006
+#define MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define MSM_BUS_SNOC_QDSS_INT 10009
+#define MSM_BUS_PNOC_SNOC_MAS 10010
+#define MSM_BUS_PNOC_SNOC_SLV 10011
+#define MSM_BUS_PNOC_INT_0 10012
+#define MSM_BUS_PNOC_INT_1 10013
+#define MSM_BUS_PNOC_M_0 10014
+#define MSM_BUS_PNOC_M_1 10015
+#define MSM_BUS_BIMC_SNOC_MAS 10016
+#define MSM_BUS_BIMC_SNOC_SLV 10017
+#define MSM_BUS_PNOC_SLV_0 10018
+#define MSM_BUS_PNOC_SLV_1 10019
+#define MSM_BUS_PNOC_SLV_2 10020
+#define MSM_BUS_PNOC_SLV_3 10021
+#define MSM_BUS_PNOC_SLV_4 10022
+#define MSM_BUS_PNOC_SLV_8 10023
+#define MSM_BUS_PNOC_SLV_9 10024
+#define MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define MSM_BUS_MNOC_BIMC_MAS 10027
+#define MSM_BUS_MNOC_BIMC_SLV 10028
+#define MSM_BUS_BIMC_MNOC_MAS 10029
+#define MSM_BUS_BIMC_MNOC_SLV 10030
+#define MSM_BUS_SNOC_BIMC_MAS 10031
+#define MSM_BUS_SNOC_BIMC_SLV 10032
+#define MSM_BUS_CNOC_SNOC_MAS 10033
+#define MSM_BUS_CNOC_SNOC_SLV 10034
+#define MSM_BUS_SNOC_CNOC_MAS 10035
+#define MSM_BUS_SNOC_CNOC_SLV 10036
+#define MSM_BUS_OVNOC_SNOC_MAS 10037
+#define MSM_BUS_OVNOC_SNOC_SLV 10038
+#define MSM_BUS_SNOC_OVNOC_MAS 10039
+#define MSM_BUS_SNOC_OVNOC_SLV 10040
+#define MSM_BUS_SNOC_PNOC_MAS 10041
+#define MSM_BUS_SNOC_PNOC_SLV 10042
+#define MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define MSM_BUS_INT_LAST 10047
+
+#define MSM_BUS_SLAVE_FIRST 512
+#define MSM_BUS_SLAVE_EBI_CH0 512
+#define MSM_BUS_SLAVE_EBI_CH1 513
+#define MSM_BUS_SLAVE_AMPSS_L2 514
+#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define MSM_BUS_SLAVE_SPS 518
+#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define MSM_BUS_SLAVE_AMPSS 520
+#define MSM_BUS_SLAVE_MSS 521
+#define MSM_BUS_SLAVE_LPASS 522
+#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define MSM_BUS_SLAVE_CORESIGHT 526
+#define MSM_BUS_SLAVE_RIVA 527
+#define MSM_BUS_SLAVE_SMI 528
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define MSM_BUS_SLAVE_MM_IMEM 531
+#define MSM_BUS_SLAVE_CRYPTO 532
+#define MSM_BUS_SLAVE_SPDM 533
+#define MSM_BUS_SLAVE_RPM 534
+#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define MSM_BUS_SLAVE_MPM 536
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define MSM_BUS_SLAVE_GSBI1_UART 542
+#define MSM_BUS_SLAVE_GSBI2_UART 543
+#define MSM_BUS_SLAVE_GSBI3_UART 544
+#define MSM_BUS_SLAVE_GSBI4_UART 545
+#define MSM_BUS_SLAVE_GSBI5_UART 546
+#define MSM_BUS_SLAVE_GSBI6_UART 547
+#define MSM_BUS_SLAVE_GSBI7_UART 548
+#define MSM_BUS_SLAVE_GSBI8_UART 549
+#define MSM_BUS_SLAVE_GSBI9_UART 550
+#define MSM_BUS_SLAVE_GSBI10_UART 551
+#define MSM_BUS_SLAVE_GSBI11_UART 552
+#define MSM_BUS_SLAVE_GSBI12_UART 553
+#define MSM_BUS_SLAVE_GSBI1_QUP 554
+#define MSM_BUS_SLAVE_GSBI2_QUP 555
+#define MSM_BUS_SLAVE_GSBI3_QUP 556
+#define MSM_BUS_SLAVE_GSBI4_QUP 557
+#define MSM_BUS_SLAVE_GSBI5_QUP 558
+#define MSM_BUS_SLAVE_GSBI6_QUP 559
+#define MSM_BUS_SLAVE_GSBI7_QUP 560
+#define MSM_BUS_SLAVE_GSBI8_QUP 561
+#define MSM_BUS_SLAVE_GSBI9_QUP 562
+#define MSM_BUS_SLAVE_GSBI10_QUP 563
+#define MSM_BUS_SLAVE_GSBI11_QUP 564
+#define MSM_BUS_SLAVE_GSBI12_QUP 565
+#define MSM_BUS_SLAVE_EBI2_NAND 566
+#define MSM_BUS_SLAVE_EBI2_CS0 567
+#define MSM_BUS_SLAVE_EBI2_CS1 568
+#define MSM_BUS_SLAVE_EBI2_CS2 569
+#define MSM_BUS_SLAVE_EBI2_CS3 570
+#define MSM_BUS_SLAVE_EBI2_CS4 571
+#define MSM_BUS_SLAVE_EBI2_CS5 572
+#define MSM_BUS_SLAVE_USB_FS1 573
+#define MSM_BUS_SLAVE_USB_FS2 574
+#define MSM_BUS_SLAVE_TSIF 575
+#define MSM_BUS_SLAVE_MSM_TSSC 576
+#define MSM_BUS_SLAVE_MSM_PDM 577
+#define MSM_BUS_SLAVE_MSM_DIMEM 578
+#define MSM_BUS_SLAVE_MSM_TCSR 579
+#define MSM_BUS_SLAVE_MSM_PRNG 580
+#define MSM_BUS_SLAVE_GSS 581
+#define MSM_BUS_SLAVE_SATA 582
+#define MSM_BUS_SLAVE_USB3 583
+#define MSM_BUS_SLAVE_WCSS 584
+#define MSM_BUS_SLAVE_OCIMEM 585
+#define MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define MSM_BUS_SLAVE_QDSS_STM 588
+#define MSM_BUS_SLAVE_CAMERA_CFG 589
+#define MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define MSM_BUS_SLAVE_OCMEM_CFG 591
+#define MSM_BUS_SLAVE_CPR_CFG 592
+#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define MSM_BUS_SLAVE_MISC_CFG 594
+#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define MSM_BUS_SLAVE_VENUS_CFG 596
+#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define MSM_BUS_SLAVE_OCMEM 604
+#define MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define MSM_BUS_SLAVE_SDCC_1 606
+#define MSM_BUS_SLAVE_SDCC_3 607
+#define MSM_BUS_SLAVE_SDCC_2 608
+#define MSM_BUS_SLAVE_SDCC_4 609
+#define MSM_BUS_SLAVE_BAM_DMA 610
+#define MSM_BUS_SLAVE_BLSP_2 611
+#define MSM_BUS_SLAVE_USB_HSIC 612
+#define MSM_BUS_SLAVE_BLSP_1 613
+#define MSM_BUS_SLAVE_USB_HS 614
+#define MSM_BUS_SLAVE_PDM 615
+#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define MSM_BUS_SLAVE_PRNG 618
+#define MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define MSM_BUS_SLAVE_CLK_CTL 620
+#define MSM_BUS_SLAVE_CNOC_MSS 621
+#define MSM_BUS_SLAVE_SECURITY 622
+#define MSM_BUS_SLAVE_TCSR 623
+#define MSM_BUS_SLAVE_TLMM 624
+#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define MSM_BUS_SLAVE_IMEM_CFG 627
+#define MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define MSM_BUS_SLAVE_BIMC_CFG 629
+#define MSM_BUS_SLAVE_BOOT_ROM 630
+#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define MSM_BUS_SLAVE_PMIC_ARB 632
+#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define MSM_BUS_SLAVE_DEHR_CFG 634
+#define MSM_BUS_SLAVE_QDSS_CFG 635
+#define MSM_BUS_SLAVE_RBCPR_CFG 636
+#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define MSM_BUS_SLAVE_PNOC_CFG 641
+#define MSM_BUS_SLAVE_SNOC_CFG 642
+#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define MSM_BUS_SLAVE_IPS_CFG 647
+#define MSM_BUS_SLAVE_QPIC 648
+#define MSM_BUS_SLAVE_DSI_CFG 649
+#define MSM_BUS_SLAVE_UFS_CFG 650
+#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define MSM_BUS_SLAVE_PCIE_CFG 653
+#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define MSM_BUS_SLAVE_VPU_CFG 658
+#define MSM_BUS_SLAVE_BCAST_CFG 659
+#define MSM_BUS_SLAVE_KLM_CFG 660
+#define MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define MSM_BUS_SLAVE_OCMEM_GFX 662
+#define MSM_BUS_SLAVE_CATS_128 663
+#define MSM_BUS_SLAVE_OCMEM_64 664
+#define MSM_BUS_SLAVE_PCIE_0 665
+#define MSM_BUS_SLAVE_PCIE_1 666
+#define MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define MSM_BUS_SLAVE_SRVC_MNOC 669
+#define MSM_BUS_SLAVE_USB_HS2 670
+#define MSM_BUS_SLAVE_LAST 671
+
+#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define ICBID_MASTER_APPSS_PROC 0
+#define ICBID_MASTER_MSS_PROC 1
+#define ICBID_MASTER_MNOC_BIMC 2
+#define ICBID_MASTER_SNOC_BIMC 3
+#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define ICBID_MASTER_CNOC_MNOC_CFG 5
+#define ICBID_MASTER_GFX3D 6
+#define ICBID_MASTER_JPEG 7
+#define ICBID_MASTER_MDP 8
+#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define ICBID_MASTER_VIDEO 9
+#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define ICBID_MASTER_VIDEO_P1 10
+#define ICBID_MASTER_VFE 11
+#define ICBID_MASTER_CNOC_ONOC_CFG 12
+#define ICBID_MASTER_JPEG_OCMEM 13
+#define ICBID_MASTER_MDP_OCMEM 14
+#define ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define ICBID_MASTER_VFE_OCMEM 17
+#define ICBID_MASTER_LPASS_AHB 18
+#define ICBID_MASTER_QDSS_BAM 19
+#define ICBID_MASTER_SNOC_CFG 20
+#define ICBID_MASTER_BIMC_SNOC 21
+#define ICBID_MASTER_CNOC_SNOC 22
+#define ICBID_MASTER_CRYPTO 23
+#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define ICBID_MASTER_CRYPTO_CORE1 24
+#define ICBID_MASTER_LPASS_PROC 25
+#define ICBID_MASTER_MSS 26
+#define ICBID_MASTER_MSS_NAV 27
+#define ICBID_MASTER_OCMEM_DMA 28
+#define ICBID_MASTER_PNOC_SNOC 29
+#define ICBID_MASTER_WCSS 30
+#define ICBID_MASTER_QDSS_ETR 31
+#define ICBID_MASTER_USB3 32
+#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define ICBID_MASTER_SDCC_1 33
+#define ICBID_MASTER_SDCC_3 34
+#define ICBID_MASTER_SDCC_2 35
+#define ICBID_MASTER_SDCC_4 36
+#define ICBID_MASTER_TSIF 37
+#define ICBID_MASTER_BAM_DMA 38
+#define ICBID_MASTER_BLSP_2 39
+#define ICBID_MASTER_USB_HSIC 40
+#define ICBID_MASTER_BLSP_1 41
+#define ICBID_MASTER_USB_HS 42
+#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define ICBID_MASTER_PNOC_CFG 43
+#define ICBID_MASTER_SNOC_PNOC 44
+#define ICBID_MASTER_RPM_INST 45
+#define ICBID_MASTER_RPM_DATA 46
+#define ICBID_MASTER_RPM_SYS 47
+#define ICBID_MASTER_DEHR 48
+#define ICBID_MASTER_QDSS_DAP 49
+#define ICBID_MASTER_SPDM 50
+#define ICBID_MASTER_TIC 51
+#define ICBID_MASTER_SNOC_CNOC 52
+#define ICBID_MASTER_GFX3D_OCMEM 53
+#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define ICBID_MASTER_OVIRT_SNOC 54
+#define ICBID_MASTER_SNOC_OVIRT 55
+#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define ICBID_MASTER_ONOC_OVIRT 56
+#define ICBID_MASTER_USB_HS2 57
+#define ICBID_MASTER_QPIC 58
+#define ICBID_MASTER_IPA 59
+#define ICBID_MASTER_DSI 60
+#define ICBID_MASTER_MDP1 61
+#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define ICBID_MASTER_VPU_PROC 62
+#define ICBID_MASTER_VPU 63
+#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define ICBID_MASTER_CRYPTO_CORE2 64
+#define ICBID_MASTER_PCIE_0 65
+#define ICBID_MASTER_PCIE_1 66
+#define ICBID_MASTER_SATA 67
+#define ICBID_MASTER_UFS 68
+#define ICBID_MASTER_USB3_1 69
+#define ICBID_MASTER_VIDEO_OCMEM 70
+#define ICBID_MASTER_VPU1 71
+#define ICBID_MASTER_VCAP 72
+#define ICBID_MASTER_EMAC 73
+#define ICBID_MASTER_BCAST 74
+#define ICBID_MASTER_MMSS_PROC 75
+#define ICBID_MASTER_SNOC_BIMC_1 76
+#define ICBID_MASTER_SNOC_PCNOC 77
+#define ICBID_MASTER_AUDIO 78
+#define ICBID_MASTER_MM_INT_0 79
+#define ICBID_MASTER_MM_INT_1 80
+#define ICBID_MASTER_MM_INT_2 81
+#define ICBID_MASTER_MM_INT_BIMC 82
+#define ICBID_MASTER_MSS_INT 83
+#define ICBID_MASTER_PCNOC_CFG 84
+#define ICBID_MASTER_PCNOC_INT_0 85
+#define ICBID_MASTER_PCNOC_INT_1 86
+#define ICBID_MASTER_PCNOC_M_0 87
+#define ICBID_MASTER_PCNOC_M_1 88
+#define ICBID_MASTER_PCNOC_S_0 89
+#define ICBID_MASTER_PCNOC_S_1 90
+#define ICBID_MASTER_PCNOC_S_2 91
+#define ICBID_MASTER_PCNOC_S_3 92
+#define ICBID_MASTER_PCNOC_S_4 93
+#define ICBID_MASTER_PCNOC_S_6 94
+#define ICBID_MASTER_PCNOC_S_7 95
+#define ICBID_MASTER_PCNOC_S_8 96
+#define ICBID_MASTER_PCNOC_S_9 97
+#define ICBID_MASTER_QDSS_INT 98
+#define ICBID_MASTER_SNOC_INT_0 99
+#define ICBID_MASTER_SNOC_INT_1 100
+#define ICBID_MASTER_SNOC_INT_BIMC 101
+#define ICBID_MASTER_TCU_0 102
+#define ICBID_MASTER_TCU_1 103
+#define ICBID_MASTER_BIMC_INT_0 104
+#define ICBID_MASTER_BIMC_INT_1 105
+#define ICBID_MASTER_CAMERA 106
+#define ICBID_MASTER_RICA 107
+
+#define ICBID_SLAVE_EBI1 0
+#define ICBID_SLAVE_APPSS_L2 1
+#define ICBID_SLAVE_BIMC_SNOC 2
+#define ICBID_SLAVE_CAMERA_CFG 3
+#define ICBID_SLAVE_DISPLAY_CFG 4
+#define ICBID_SLAVE_OCMEM_CFG 5
+#define ICBID_SLAVE_CPR_CFG 6
+#define ICBID_SLAVE_CPR_XPU_CFG 7
+#define ICBID_SLAVE_MISC_CFG 8
+#define ICBID_SLAVE_MISC_XPU_CFG 9
+#define ICBID_SLAVE_VENUS_CFG 10
+#define ICBID_SLAVE_GFX3D_CFG 11
+#define ICBID_SLAVE_MMSS_CLK_CFG 12
+#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define ICBID_SLAVE_MNOC_MPU_CFG 14
+#define ICBID_SLAVE_ONOC_MPU_CFG 15
+#define ICBID_SLAVE_MNOC_BIMC 16
+#define ICBID_SLAVE_SERVICE_MNOC 17
+#define ICBID_SLAVE_OCMEM 18
+#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define ICBID_SLAVE_SERVICE_ONOC 19
+#define ICBID_SLAVE_APPSS 20
+#define ICBID_SLAVE_LPASS 21
+#define ICBID_SLAVE_USB3 22
+#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define ICBID_SLAVE_WCSS 23
+#define ICBID_SLAVE_SNOC_BIMC 24
+#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define ICBID_SLAVE_SNOC_CNOC 25
+#define ICBID_SLAVE_IMEM 26
+#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define ICBID_SLAVE_SNOC_OVIRT 27
+#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define ICBID_SLAVE_SNOC_PNOC 28
+#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define ICBID_SLAVE_SERVICE_SNOC 29
+#define ICBID_SLAVE_QDSS_STM 30
+#define ICBID_SLAVE_SDCC_1 31
+#define ICBID_SLAVE_SDCC_3 32
+#define ICBID_SLAVE_SDCC_2 33
+#define ICBID_SLAVE_SDCC_4 34
+#define ICBID_SLAVE_TSIF 35
+#define ICBID_SLAVE_BAM_DMA 36
+#define ICBID_SLAVE_BLSP_2 37
+#define ICBID_SLAVE_USB_HSIC 38
+#define ICBID_SLAVE_BLSP_1 39
+#define ICBID_SLAVE_USB_HS 40
+#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define ICBID_SLAVE_PDM 41
+#define ICBID_SLAVE_PERIPH_APU_CFG 42
+#define ICBID_SLAVE_PNOC_MPU_CFG 43
+#define ICBID_SLAVE_PRNG 44
+#define ICBID_SLAVE_PNOC_SNOC 45
+#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define ICBID_SLAVE_SERVICE_PNOC 46
+#define ICBID_SLAVE_CLK_CTL 47
+#define ICBID_SLAVE_CNOC_MSS 48
+#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define ICBID_SLAVE_SECURITY 49
+#define ICBID_SLAVE_TCSR 50
+#define ICBID_SLAVE_TLMM 51
+#define ICBID_SLAVE_CRYPTO_0_CFG 52
+#define ICBID_SLAVE_CRYPTO_1_CFG 53
+#define ICBID_SLAVE_IMEM_CFG 54
+#define ICBID_SLAVE_MESSAGE_RAM 55
+#define ICBID_SLAVE_BIMC_CFG 56
+#define ICBID_SLAVE_BOOT_ROM 57
+#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define ICBID_SLAVE_PMIC_ARB 59
+#define ICBID_SLAVE_SPDM_WRAPPER 60
+#define ICBID_SLAVE_DEHR_CFG 61
+#define ICBID_SLAVE_MPM 62
+#define ICBID_SLAVE_QDSS_CFG 63
+#define ICBID_SLAVE_RBCPR_CFG 64
+#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define ICBID_SLAVE_SNOC_MPU_CFG 67
+#define ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define ICBID_SLAVE_PNOC_CFG 69
+#define ICBID_SLAVE_SNOC_CFG 70
+#define ICBID_SLAVE_EBI1_DLL_CFG 71
+#define ICBID_SLAVE_PHY_APU_CFG 72
+#define ICBID_SLAVE_EBI1_PHY_CFG 73
+#define ICBID_SLAVE_RPM 74
+#define ICBID_SLAVE_CNOC_SNOC 75
+#define ICBID_SLAVE_SERVICE_CNOC 76
+#define ICBID_SLAVE_OVIRT_SNOC 77
+#define ICBID_SLAVE_OVIRT_OCMEM 78
+#define ICBID_SLAVE_USB_HS2 79
+#define ICBID_SLAVE_QPIC 80
+#define ICBID_SLAVE_IPS_CFG 81
+#define ICBID_SLAVE_DSI_CFG 82
+#define ICBID_SLAVE_USB3_1 83
+#define ICBID_SLAVE_PCIE_0 84
+#define ICBID_SLAVE_PCIE_1 85
+#define ICBID_SLAVE_PSS_SMMU_CFG 86
+#define ICBID_SLAVE_CRYPTO_2_CFG 87
+#define ICBID_SLAVE_PCIE_0_CFG 88
+#define ICBID_SLAVE_PCIE_1_CFG 89
+#define ICBID_SLAVE_SATA_CFG 90
+#define ICBID_SLAVE_SPSS_GENI_IR 91
+#define ICBID_SLAVE_UFS_CFG 92
+#define ICBID_SLAVE_AVSYNC_CFG 93
+#define ICBID_SLAVE_VPU_CFG 94
+#define ICBID_SLAVE_USB_PHY_CFG 95
+#define ICBID_SLAVE_RBCPR_MX_CFG 96
+#define ICBID_SLAVE_PCIE_PARF 97
+#define ICBID_SLAVE_VCAP_CFG 98
+#define ICBID_SLAVE_EMAC_CFG 99
+#define ICBID_SLAVE_BCAST_CFG 100
+#define ICBID_SLAVE_KLM_CFG 101
+#define ICBID_SLAVE_DISPLAY_PWM 102
+#define ICBID_SLAVE_GENI 103
+#define ICBID_SLAVE_SNOC_BIMC_1 104
+#define ICBID_SLAVE_AUDIO 105
+#define ICBID_SLAVE_CATS_0 106
+#define ICBID_SLAVE_CATS_1 107
+#define ICBID_SLAVE_MM_INT_0 108
+#define ICBID_SLAVE_MM_INT_1 109
+#define ICBID_SLAVE_MM_INT_2 110
+#define ICBID_SLAVE_MM_INT_BIMC 111
+#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define ICBID_SLAVE_MSS_INT 113
+#define ICBID_SLAVE_PCNOC_INT_0 114
+#define ICBID_SLAVE_PCNOC_INT_1 115
+#define ICBID_SLAVE_PCNOC_M_0 116
+#define ICBID_SLAVE_PCNOC_M_1 117
+#define ICBID_SLAVE_PCNOC_S_0 118
+#define ICBID_SLAVE_PCNOC_S_1 119
+#define ICBID_SLAVE_PCNOC_S_2 120
+#define ICBID_SLAVE_PCNOC_S_3 121
+#define ICBID_SLAVE_PCNOC_S_4 122
+#define ICBID_SLAVE_PCNOC_S_6 123
+#define ICBID_SLAVE_PCNOC_S_7 124
+#define ICBID_SLAVE_PCNOC_S_8 125
+#define ICBID_SLAVE_PCNOC_S_9 126
+#define ICBID_SLAVE_PRNG_XPU_CFG 127
+#define ICBID_SLAVE_QDSS_INT 128
+#define ICBID_SLAVE_RPM_XPU_CFG 129
+#define ICBID_SLAVE_SNOC_INT_0 130
+#define ICBID_SLAVE_SNOC_INT_1 131
+#define ICBID_SLAVE_SNOC_INT_BIMC 132
+#define ICBID_SLAVE_TCU 133
+#define ICBID_SLAVE_BIMC_INT_0 134
+#define ICBID_SLAVE_BIMC_INT_1 135
+#define ICBID_SLAVE_RICA_CFG 136
+
+#endif
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 3ecc07d0da77..4febe7b0d7c5 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -202,6 +202,8 @@ struct clk_ops {
struct clk_rate_request *req);
int (*set_parent)(struct clk_hw *hw, u8 index);
u8 (*get_parent)(struct clk_hw *hw);
+ struct clk_hw *(*get_safe_parent)(struct clk_hw *hw,
+ unsigned long *safe_freq);
int (*set_rate)(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate);
int (*set_rate_and_parent)(struct clk_hw *hw,
@@ -432,7 +434,7 @@ void clk_unregister_divider(struct clk *clk);
struct clk_mux {
struct clk_hw hw;
void __iomem *reg;
- u32 *table;
+ unsigned int *table;
u32 mask;
u8 shift;
u8 flags;
@@ -448,6 +450,11 @@ struct clk_mux {
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
+unsigned int clk_mux_get_parent(struct clk_hw *hw, unsigned int val,
+ unsigned int *table, unsigned long flags);
+unsigned int clk_mux_reindex(u8 index, unsigned int *table,
+ unsigned long flags);
+
struct clk *clk_register_mux(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
@@ -458,7 +465,7 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
unsigned long flags,
void __iomem *reg, u8 shift, u32 mask,
- u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+ u8 clk_mux_flags, unsigned int *table, spinlock_t *lock);
void clk_unregister_mux(struct clk *clk);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index df5561954774..978fe9f3b0fa 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -69,6 +69,8 @@ struct cpufreq_policy {
unsigned int kobj_cpu; /* cpu managing sysfs files, can be offline */
struct clk *clk;
+ struct clk *l2_clk; /* L2 clock */
+ unsigned int l2_rate[3]; /* L2 bus clock rate thresholds */
struct cpufreq_cpuinfo cpuinfo;/* see above */
unsigned int min; /* in kHz */
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index c8e1831d7572..eb1b9d727f39 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -18,6 +18,8 @@ enum dma_attr {
DMA_ATTR_NO_KERNEL_MAPPING,
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
+ DMA_ATTR_STRONGLY_ORDERED,
+ DMA_ATTR_SKIP_ZEROING,
DMA_ATTR_MAX,
};
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index c518eb589260..f0586b08c9e3 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -324,6 +324,7 @@ phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
int memblock_is_memory(phys_addr_t addr);
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
+int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
int memblock_is_reserved(phys_addr_t addr);
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h
index 742ebf1b76ca..a7af2edf0f64 100644
--- a/include/linux/mfd/qcom_rpm.h
+++ b/include/linux/mfd/qcom_rpm.h
@@ -9,5 +9,6 @@ struct qcom_rpm;
#define QCOM_RPM_SLEEP_STATE 1
int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count);
+int qcom_rpm_read(struct qcom_rpm *rpm, int resource, u32 *buf, size_t count);
#endif
diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h
new file mode 100644
index 000000000000..c9d648d38ec4
--- /dev/null
+++ b/include/linux/msm-bus-board.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+ DUAL_CTX,
+ ACTIVE_CTX,
+ NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+ unsigned int id;
+ const char *name;
+ struct msm_bus_node_info *info;
+ unsigned int len;
+ int ahb;
+ const char *fabclk[NUM_CTX];
+ const char *iface_clk;
+ unsigned int offset;
+ unsigned int haltid;
+ unsigned int rpm_enabled;
+ unsigned int nmasters;
+ unsigned int nslaves;
+ unsigned int ntieredslaves;
+ bool il_flag;
+ const struct msm_bus_board_algorithm *board_algo;
+ int hw_sel;
+ void *hw_data;
+ uint32_t qos_freq;
+ uint32_t qos_baseoffset;
+ u64 nr_lim_thresh;
+ uint32_t eff_fact;
+ uint32_t qos_delta;
+ bool virt;
+};
+
+struct msm_bus_device_node_registration {
+ struct msm_bus_node_device_type *info;
+ unsigned int num_devices;
+ bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+ MSM_BUS_BW_TIER1 = 1,
+ MSM_BUS_BW_TIER2,
+ MSM_BUS_BW_COUNT,
+ MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+ uint32_t haltval;
+ uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168 /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+ ((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+ { \
+ (word) &= ~(fieldmask); \
+ (word) |= (fieldvalue); \
+ }
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+ MSM_BUS_SET_BITFIELD(u32haltmask, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+ MSM_BUS_SET_BITFIELD(u32haltval, \
+ MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE), \
+ MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+ MSM_BUS_CLK_HALT_FIELDSIZE))\
+
+#define RPM_BUS_SLAVE_REQ 0x766c7362
+#define RPM_BUS_MASTER_REQ 0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+ RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+ RPM_MASTER_FIELD_BW = 0x00007762,
+ RPM_MASTER_FIELD_BW_T0 = 0x30747762,
+ RPM_MASTER_FIELD_BW_T1 = 0x31747762,
+ RPM_MASTER_FIELD_BW_T2 = 0x32747762,
+};
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
new file mode 100644
index 000000000000..1f5edc964c49
--- /dev/null
+++ b/include/linux/msm-bus.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct msm_bus_vectors {
+ int src; /* Master */
+ int dst; /* Slave */
+ uint64_t ab; /* Arbitrated bandwidth */
+ uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+ int num_paths;
+ struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+ struct msm_bus_paths *usecase;
+ int num_usecases;
+ const char *name;
+ /*
+ * If the active_only flag is set to 1, the BW request is applied
+ * only when at least one CPU is active (powered on). If the flag
+ * is set to 0, then the BW request is always applied irrespective
+ * of the CPU state.
+ */
+ unsigned int active_only;
+};
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_MSM_BUS_SCALING) || defined(CONFIG_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+ return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+ return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+ return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+ return 0;
+}
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_MSM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+ struct platform_device *pdev, struct device_node *of_node)
+{
+ return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
diff --git a/include/linux/msm_iommu_domains.h b/include/linux/msm_iommu_domains.h
new file mode 100644
index 000000000000..811d7737ddb5
--- /dev/null
+++ b/include/linux/msm_iommu_domains.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_IOMMU_DOMAINS_H
+#define _LINUX_MSM_IOMMU_DOMAINS_H
+
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/genalloc.h>
+#include <linux/rbtree.h>
+#include <linux/dma-buf.h>
+
+#define MSM_IOMMU_DOMAIN_SECURE 0x1
+
+enum {
+ VIDEO_DOMAIN,
+ CAMERA_DOMAIN,
+ DISPLAY_READ_DOMAIN,
+ DISPLAY_WRITE_DOMAIN,
+ ROTATOR_SRC_DOMAIN,
+ ROTATOR_DST_DOMAIN,
+ MAX_DOMAINS
+};
+
+enum {
+ VIDEO_FIRMWARE_POOL,
+ VIDEO_MAIN_POOL,
+ GEN_POOL,
+};
+
+struct mem_pool {
+ struct mutex pool_mutex;
+ struct gen_pool *gpool;
+ phys_addr_t paddr;
+ unsigned long size;
+ unsigned long free;
+ unsigned int id;
+};
+
+struct msm_iommu_domain {
+ /* iommu domain to map in */
+ struct iommu_domain *domain;
+ /* total number of allocations from this domain */
+ atomic_t allocation_cnt;
+ /* number of iova pools */
+ int npools;
+ /*
+ * array of gen_pools for allocating iovas.
+ * behavior is undefined if these overlap
+ */
+ struct mem_pool *iova_pools;
+};
+
+struct msm_iommu_domain_name {
+ char *name;
+ int domain;
+};
+
+struct iommu_domains_pdata {
+ struct msm_iommu_domain *domains;
+ int ndomains;
+ struct msm_iommu_domain_name *domain_names;
+ int nnames;
+ unsigned int domain_alloc_flags;
+};
+
+struct msm_iova_partition {
+ unsigned long start;
+ unsigned long size;
+};
+
+struct msm_iova_layout {
+ struct msm_iova_partition *partitions;
+ int npartitions;
+ const char *client_name;
+ unsigned int domain_flags;
+ unsigned int is_secure;
+};
+
+#if defined(CONFIG_QCOM_IOMMU)
+extern void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name);
+extern struct iommu_domain *msm_get_iommu_domain(int domain_num);
+extern int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova);
+
+extern void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_use_iommu(void);
+
+extern int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached);
+
+extern void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size);
+
+extern int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val);
+
+extern void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size);
+
+extern int msm_register_domain(struct msm_iova_layout *layout);
+extern int msm_unregister_domain(struct iommu_domain *domain);
+
+int msm_map_dma_buf(struct dma_buf *dma_buf, struct sg_table *table,
+ int domain_num, int partition_num, unsigned long align,
+ unsigned long iova_length, unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags, unsigned long iommu_flags);
+
+void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num);
+#else
+static inline void msm_iommu_set_client_name(struct iommu_domain *domain,
+ char const *name)
+{
+}
+
+static inline struct iommu_domain *msm_get_iommu_domain(int subsys_id)
+{
+ return NULL;
+}
+
+static inline int msm_allocate_iova_address(unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long *iova)
+{
+ return -ENOMEM;
+}
+
+static inline void msm_free_iova_address(unsigned long iova,
+ unsigned int iommu_domain,
+ unsigned int partition_no,
+ unsigned long size) { }
+
+static inline int msm_use_iommu(void)
+{
+ return 0;
+}
+
+static inline int msm_iommu_map_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ phys_addr_t phys_addr,
+ unsigned long size,
+ unsigned long page_size,
+ int cached)
+{
+ return -ENODEV;
+}
+
+static inline void msm_iommu_unmap_extra(struct iommu_domain *domain,
+ unsigned long start_iova,
+ unsigned long size,
+ unsigned long page_size)
+{
+}
+
+static inline int msm_iommu_map_contig_buffer(phys_addr_t phys,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size,
+ unsigned long align,
+ unsigned long cached,
+ dma_addr_t *iova_val)
+{
+ *iova_val = phys;
+ return 0;
+}
+
+static inline void msm_iommu_unmap_contig_buffer(dma_addr_t iova,
+ unsigned int domain_no,
+ unsigned int partition_no,
+ unsigned long size)
+{
+}
+
+static inline int msm_register_domain(struct msm_iova_layout *layout)
+{
+ return -ENODEV;
+}
+
+static inline int msm_unregister_domain(struct iommu_domain *domain)
+{
+ return -ENODEV;
+}
+
+static inline int msm_map_dma_buf(struct dma_buf *dma_buf,
+ struct sg_table *table,
+ int domain_num, int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long *iova,
+ unsigned long *buffer_size,
+ unsigned long flags,
+ unsigned long iommu_flags)
+{
+ return -ENODEV;
+}
+
+static inline void msm_unmap_dma_buf(struct sg_table *table, int domain_num,
+ int partition_num)
+{
+
+}
+
+#endif /* CONFIG_QCOM_IOMMU */
+#endif /* _LINUX_MSM_IOMMU_DOMAINS_H */ \ No newline at end of file
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index cc7dd687a89d..4b8cb7da6b4a 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -55,7 +55,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_configure_masks(struct device *dev, struct device_node *np);
+int of_dma_configure_ops(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -98,7 +100,15 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+static inline void of_dma_configure_masks(struct device *dev,
+ struct device_node *np)
+{}
+static inline int of_dma_configure_ops(struct device *dev,
+ struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index e817722ee3f0..16380b1f49b5 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -22,6 +22,7 @@ struct device;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
#if defined(CONFIG_PM_OPP)
@@ -50,6 +51,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
unsigned long u_volt);
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt);
+
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -114,6 +118,12 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
{
}
+static inline int dev_pm_opp_adjust_voltage(struct device *dev,
+ unsigned long freq, unsigned long u_volt)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
diff --git a/include/linux/qcom_iommu.h b/include/linux/qcom_iommu.h
new file mode 100644
index 000000000000..54f47b378ed6
--- /dev/null
+++ b/include/linux/qcom_iommu.h
@@ -0,0 +1,388 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_IOMMU_H
+#define MSM_IOMMU_H
+
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/list.h>
+#include <linux/regulator/consumer.h>
+#include <soc/qcom/socinfo.h>
+
+/* Private pgprot flag */
+#define IOMMU_PRIV 16
+
+extern pgprot_t pgprot_kernel;
+extern struct bus_type msm_iommu_sec_bus_type;
+extern struct iommu_access_ops iommu_access_ops_v0;
+extern struct iommu_access_ops iommu_access_ops_v1;
+
+/* Domain attributes */
+#define MSM_IOMMU_DOMAIN_PT_CACHEABLE 0x1
+#define MSM_IOMMU_DOMAIN_PT_SECURE 0x2
+
+/* Mask for the cache policy attribute */
+#define MSM_IOMMU_CP_MASK 0x03
+
+#define DOMAIN_ATTR_QCOM_COHERENT_HTW_DISABLE DOMAIN_ATTR_MAX
+
+/* Maximum number of Machine IDs that we are allowing to be mapped to the same
+ * context bank. The number of MIDs mapped to the same CB does not affect
+ * performance, but there is a practical limit on how many distinct MIDs may
+ * be present. These mappings are typically determined at design time and are
+ * not expected to change at run time.
+ */
+#define MAX_NUM_MIDS 32
+
+/* Maximum number of SMT entries allowed by the system */
+#define MAX_NUM_SMR 128
+
+#define MAX_NUM_BFB_REGS 32
+
+/**
+ * struct msm_iommu_dev - a single IOMMU hardware instance
+ * name Human-readable name given to this IOMMU HW instance
+ * ncb Number of context banks present on this IOMMU HW instance
+ */
+struct msm_iommu_dev {
+ const char *name;
+ int ncb;
+ int ttbr_split;
+};
+
+/**
+ * struct msm_iommu_ctx_dev - an IOMMU context bank instance
+ * name Human-readable name given to this context bank
+ * num Index of this context bank within the hardware
+ * mids List of Machine IDs that are to be mapped into this context
+ * bank, terminated by -1. The MID is a set of signals on the
+ * AXI bus that identifies the function associated with a specific
+ * memory request. (See ARM spec).
+ */
+struct msm_iommu_ctx_dev {
+ const char *name;
+ int num;
+ int mids[MAX_NUM_MIDS];
+};
+
+/**
+ * struct msm_iommu_bfb_settings - a set of IOMMU BFB tuning parameters
+ * regs An array of register offsets to configure
+ * data Values to write to corresponding registers
+ * length Number of valid entries in the offset/val arrays
+ */
+struct msm_iommu_bfb_settings {
+ unsigned int regs[MAX_NUM_BFB_REGS];
+ unsigned int data[MAX_NUM_BFB_REGS];
+ int length;
+};
+
+/**
+ * struct msm_iommu_drvdata - A single IOMMU hardware instance
+ * @base: IOMMU config port base address (VA)
+ * @glb_base: IOMMU config port base address for global register space (VA)
+ * @phys_base: IOMMU physical base address.
+ * @ncb The number of contexts on this IOMMU
+ * @irq: Interrupt number
+ * @core: The bus clock for this IOMMU hardware instance
+ * @iface: The clock for the IOMMU bus interconnect
+ * @name: Human-readable name of this IOMMU device
+ * @bfb_settings: Optional BFB performance tuning parameters
+ * @dev: Struct device this hardware instance is tied to
+ * @list: List head to link all iommus together
+ * @halt_enabled: Set to 1 if IOMMU halt is supported in the IOMMU, 0 otherwise.
+ * @ctx_attach_count: Count of how many context are attached.
+ * @bus_client : Bus client needed to vote for bus bandwidth.
+ * @needs_rem_spinlock : 1 if remote spinlock is needed, 0 otherwise
+ * @powered_on: Powered status of the IOMMU. 0 means powered off.
+ *
+ * A msm_iommu_drvdata holds the global driver data about a single piece
+ * of an IOMMU hardware instance.
+ */
+struct msm_iommu_drvdata {
+ void __iomem *base;
+ phys_addr_t phys_base;
+ void __iomem *glb_base;
+ void __iomem *cb_base;
+ void __iomem *smmu_local_base;
+ int ncb;
+ int ttbr_split;
+ struct clk *core;
+ struct clk *iface;
+ const char *name;
+ struct msm_iommu_bfb_settings *bfb_settings;
+ int sec_id;
+ struct device *dev;
+ struct list_head list;
+ int halt_enabled;
+ unsigned int ctx_attach_count;
+ unsigned int bus_client;
+ int needs_rem_spinlock;
+ int powered_on;
+ unsigned int model;
+};
+
+/**
+ * struct iommu_access_ops - Callbacks for accessing IOMMU
+ * @iommu_power_on: Turn on power to unit
+ * @iommu_power_off: Turn off power to unit
+ * @iommu_bus_vote: Vote for bus bandwidth
+ * @iommu_clk_on: Turn on clks to unit
+ * @iommu_clk_off: Turn off clks to unit
+ * @iommu_lock_initialize: Initialize the remote lock
+ * @iommu_lock_acquire: Acquire any locks needed
+ * @iommu_lock_release: Release locks needed
+ */
+struct iommu_access_ops {
+ int (*iommu_power_on)(struct msm_iommu_drvdata *);
+ void (*iommu_power_off)(struct msm_iommu_drvdata *);
+ int (*iommu_bus_vote)(struct msm_iommu_drvdata *drvdata,
+ unsigned int vote);
+ int (*iommu_clk_on)(struct msm_iommu_drvdata *);
+ void (*iommu_clk_off)(struct msm_iommu_drvdata *);
+ void * (*iommu_lock_initialize)(void);
+ void (*iommu_lock_acquire)(unsigned int need_extra_lock);
+ void (*iommu_lock_release)(unsigned int need_extra_lock);
+};
+
+void msm_iommu_add_drv(struct msm_iommu_drvdata *drv);
+void msm_iommu_remove_drv(struct msm_iommu_drvdata *drv);
+void program_iommu_bfb_settings(void __iomem *base,
+ const struct msm_iommu_bfb_settings *bfb_settings);
+void iommu_halt(const struct msm_iommu_drvdata *iommu_drvdata);
+void iommu_resume(const struct msm_iommu_drvdata *iommu_drvdata);
+
+/**
+ * struct msm_iommu_ctx_drvdata - an IOMMU context bank instance
+ * @num: Hardware context number of this context
+ * @pdev: Platform device associated wit this HW instance
+ * @attached_elm: List element for domains to track which devices are
+ * attached to them
+ * @attached_domain Domain currently attached to this context (if any)
+ * @name Human-readable name of this context device
+ * @sids List of Stream IDs mapped to this context
+ * @nsid Number of Stream IDs mapped to this context
+ * @secure_context true if this is a secure context programmed by
+ the secure environment, false otherwise
+ * @asid ASID used with this context.
+ * @attach_count Number of time this context has been attached.
+ *
+ * A msm_iommu_ctx_drvdata holds the driver data for a single context bank
+ * within each IOMMU hardware instance
+ */
+struct msm_iommu_ctx_drvdata {
+ int num;
+ struct platform_device *pdev;
+ struct list_head attached_elm;
+ struct iommu_domain *attached_domain;
+ const char *name;
+ u32 sids[MAX_NUM_SMR];
+ unsigned int nsid;
+ unsigned int secure_context;
+ int asid;
+ int attach_count;
+ u32 sid_mask[MAX_NUM_SMR];
+ unsigned int n_sid_mask;
+};
+
+enum dump_reg {
+ DUMP_REG_FIRST,
+ DUMP_REG_FAR0 = DUMP_REG_FIRST,
+ DUMP_REG_FAR1,
+ DUMP_REG_PAR0,
+ DUMP_REG_PAR1,
+ DUMP_REG_FSR,
+ DUMP_REG_FSYNR0,
+ DUMP_REG_FSYNR1,
+ DUMP_REG_TTBR0_0,
+ DUMP_REG_TTBR0_1,
+ DUMP_REG_TTBR1_0,
+ DUMP_REG_TTBR1_1,
+ DUMP_REG_SCTLR,
+ DUMP_REG_ACTLR,
+ DUMP_REG_PRRR,
+ DUMP_REG_MAIR0 = DUMP_REG_PRRR,
+ DUMP_REG_NMRR,
+ DUMP_REG_MAIR1 = DUMP_REG_NMRR,
+ DUMP_REG_CBAR_N,
+ DUMP_REG_CBFRSYNRA_N,
+ MAX_DUMP_REGS,
+};
+
+enum dump_reg_type {
+ DRT_CTX_REG,
+ DRT_GLOBAL_REG,
+ DRT_GLOBAL_REG_N,
+};
+
+enum model_id {
+ QSMMUv1 = 1,
+ QSMMUv2,
+ MMU_500 = 500,
+ MAX_MODEL,
+};
+
+struct dump_regs_tbl_entry {
+ /*
+ * To keep things context-bank-agnostic, we only store the
+ * register offset in `reg_offset'
+ */
+ unsigned int reg_offset;
+ const char *name;
+ int must_be_present;
+ enum dump_reg_type dump_reg_type;
+};
+extern struct dump_regs_tbl_entry dump_regs_tbl[MAX_DUMP_REGS];
+
+#define COMBINE_DUMP_REG(upper, lower) (((u64) upper << 32) | lower)
+
+struct msm_iommu_context_reg {
+ uint32_t val;
+ bool valid;
+};
+
+void print_ctx_regs(struct msm_iommu_context_reg regs[]);
+
+/*
+ * Interrupt handler for the IOMMU context fault interrupt. Hooking the
+ * interrupt is not supported in the API yet, but this will print an error
+ * message and dump useful IOMMU registers.
+ */
+irqreturn_t msm_iommu_global_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler(int irq, void *dev_id);
+irqreturn_t msm_iommu_fault_handler_v2(int irq, void *dev_id);
+irqreturn_t msm_iommu_secure_fault_handler_v2(int irq, void *dev_id);
+
+enum {
+ PROC_APPS,
+ PROC_GPU,
+ PROC_MAX
+};
+
+/* Expose structure to allow kgsl iommu driver to use the same structure to
+ * communicate to GPU the addresses of the flag and turn variables.
+ */
+struct remote_iommu_petersons_spinlock {
+ uint32_t flag[PROC_MAX];
+ uint32_t turn;
+};
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void *msm_iommu_lock_initialize(void);
+void msm_iommu_mutex_lock(void);
+void msm_iommu_mutex_unlock(void);
+void msm_set_iommu_access_ops(struct iommu_access_ops *ops);
+struct iommu_access_ops *msm_get_iommu_access_ops(void);
+#else
+static inline void *msm_iommu_lock_initialize(void)
+{
+ return NULL;
+}
+static inline void msm_iommu_mutex_lock(void) { }
+static inline void msm_iommu_mutex_unlock(void) { }
+static inline void msm_set_iommu_access_ops(struct iommu_access_ops *ops)
+{
+
+}
+static inline struct iommu_access_ops *msm_get_iommu_access_ops(void)
+{
+ return NULL;
+}
+#endif
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+/*
+ * Look up an IOMMU context device by its context name. NULL if none found.
+ * Useful for testing and drivers that do not yet fully have IOMMU stuff in
+ * their platform devices.
+ */
+struct device *msm_iommu_get_ctx(const char *ctx_name);
+#else
+static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
+{
+ return NULL;
+}
+#endif
+
+/*
+ * Function to program the global registers of an IOMMU securely.
+ * This should only be called on IOMMUs for which kernel programming
+ * of global registers is not possible
+ */
+void msm_iommu_sec_set_access_ops(struct iommu_access_ops *access_ops);
+int msm_iommu_sec_program_iommu(struct msm_iommu_drvdata *drvdata,
+ struct msm_iommu_ctx_drvdata *ctx_drvdata);
+int is_vfe_secure(void);
+
+#ifdef CONFIG_MSM_IOMMU_V0
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ static int soc_supports_v0 = -1;
+#ifdef CONFIG_OF
+ struct device_node *node;
+#endif
+
+ if (soc_supports_v0 != -1)
+ return soc_supports_v0;
+
+#ifdef CONFIG_OF
+ node = of_find_compatible_node(NULL, NULL, "qcom,msm-smmu-v0");
+ if (node) {
+ soc_supports_v0 = 1;
+ of_node_put(node);
+ return 1;
+ }
+#endif
+ if (cpu_is_msm8960() &&
+ SOCINFO_VERSION_MAJOR(socinfo_get_version()) < 2) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ if (cpu_is_msm8x60() &&
+ (SOCINFO_VERSION_MAJOR(socinfo_get_version()) != 2 ||
+ SOCINFO_VERSION_MINOR(socinfo_get_version()) < 1)) {
+ soc_supports_v0 = 0;
+ return 0;
+ }
+
+ soc_supports_v0 = 1;
+ return 1;
+}
+#else
+static inline int msm_soc_version_supports_iommu_v0(void)
+{
+ return 0;
+}
+#endif
+
+int msm_iommu_get_scm_call_avail(void);
+void msm_iommu_check_scm_call_avail(void);
+
+u32 msm_iommu_get_mair0(void);
+u32 msm_iommu_get_mair1(void);
+u32 msm_iommu_get_prrr(void);
+u32 msm_iommu_get_nmrr(void);
+
+/* events for notifiers passed to msm_iommu_register_notify */
+#define TLB_SYNC_TIMEOUT 1
+
+#ifdef CONFIG_QCOM_IOMMU_V1
+void msm_iommu_register_notify(struct notifier_block *nb);
+#else
+static inline void msm_iommu_register_notify(struct notifier_block *nb)
+{
+}
+#endif
+
+#endif
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 6e7d5ec65838..a3fcad228907 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -13,6 +13,8 @@
#ifndef __QCOM_SCM_H
#define __QCOM_SCM_H
+#include <linux/platform_device.h>
+
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
@@ -27,6 +29,12 @@ extern bool qcom_scm_hdcp_available(void);
extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
u32 *resp);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(struct device *dev, u32 peripheral, const void *metadata, size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
#define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0
#define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1
@@ -36,4 +44,28 @@ extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
+extern int qcom_scm_pil_init_image_cmd(u32 proc, u64 image_addr);
+extern int qcom_scm_pil_mem_setup_cmd(u32 proc, u64 start_addr, u32 len);
+extern int qcom_scm_pil_auth_and_reset_cmd(u32 proc);
+extern int qcom_scm_pil_shutdown_cmd(u32 proc);
+
+extern int qcom_scm_iommu_dump_fault_regs(u32 id, u32 context, u64 addr,
+ u32 len);
+extern int qcom_scm_iommu_set_cp_pool_size(u32 size, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, int psize[2]);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+extern int qcom_scm_iommu_secure_map(u64 list, u32 list_size, u32 size,
+ u32 id, u32 ctx_id, u64 va,
+ u32 info_size, u32 flags);
+extern int qcom_scm_iommu_secure_unmap(u32 id, u32 ctx_id, u64 va,
+ u32 size, u32 flags);
+
+extern int qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int qcom_scm_get_feat_version(u32 feat);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+
+extern int qcom_scm_set_video_state(u32 state, u32 spare);
+extern int qcom_scm_mem_protect_video_var(u32 start, u32 size,
+ u32 nonpixel_start,
+ u32 nonpixel_size);
#endif
diff --git a/include/linux/regulator/qcom_smd-regulator.h b/include/linux/regulator/qcom_smd-regulator.h
new file mode 100644
index 000000000000..16029448d6b6
--- /dev/null
+++ b/include/linux/regulator/qcom_smd-regulator.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __QCOM_SMD_REGULATOR_H_
+#define __QCOM_SMD_REGULATOR_H_
+
+#if IS_ENABLED(CONFIG_REGULATOR_QCOM_SMD_RPM)
+int qcom_rpm_set_floor(struct regulator *regulator, int floor);
+int qcom_rpm_set_corner(struct regulator *regulator, int corner);
+#else
+static inline int qcom_rpm_set_floor(struct regulator *regulator, int floor)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_rpm_set_corner(struct regulator *regulator, int corner)
+{
+ return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index 9c4e1384f636..1c457a8dd5a6 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -365,6 +365,8 @@ enum rproc_state {
/**
* enum rproc_crash_type - remote processor crash types
* @RPROC_MMUFAULT: iommu fault
+ * @RPROC_WATCHDOG: watchdog bite
+ * @RPROC_FATAL_ERROR fatal error
*
* Each element of the enum is used as an array index. So that, the value of
* the elements should be always something sane.
@@ -373,6 +375,8 @@ enum rproc_state {
*/
enum rproc_crash_type {
RPROC_MMUFAULT,
+ RPROC_WATCHDOG,
+ RPROC_FATAL_ERROR,
};
/**
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
index d7e50aa6a4ac..8a955e00357f 100644
--- a/include/linux/soc/qcom/smd.h
+++ b/include/linux/soc/qcom/smd.h
@@ -5,8 +5,64 @@
#include <linux/mod_devicetable.h>
struct qcom_smd;
-struct qcom_smd_channel;
struct qcom_smd_lookup;
+struct qcom_smd_device;
+
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_device *, const void *, size_t);
+
+/*
+ * SMD channel states.
+ */
+enum smd_channel_state {
+ SMD_CHANNEL_CLOSED,
+ SMD_CHANNEL_OPENING,
+ SMD_CHANNEL_OPENED,
+ SMD_CHANNEL_FLUSHING,
+ SMD_CHANNEL_CLOSING,
+ SMD_CHANNEL_RESET,
+ SMD_CHANNEL_RESET_OPENING
+};
+
+struct qcom_smd_channel {
+ struct qcom_smd_edge *edge;
+
+ struct qcom_smd_device *qsdev;
+
+ char *name;
+ enum smd_channel_state state;
+ enum smd_channel_state remote_state;
+
+ struct smd_channel_info *tx_info;
+ struct smd_channel_info *rx_info;
+
+ struct smd_channel_info_word *tx_info_word;
+ struct smd_channel_info_word *rx_info_word;
+
+ struct mutex tx_lock;
+ wait_queue_head_t fblockread_event;
+
+ void *tx_fifo;
+ void *rx_fifo;
+ int fifo_size;
+
+ void *bounce_buffer;
+ qcom_smd_cb_t cb;
+
+ spinlock_t recv_lock;
+
+ int pkt_size;
+
+ struct list_head list;
+ struct list_head dev_list;
+};
+
+/**
+ * struct qcom_smd_id - struct used for matching a smd device
+ * @name: name of the channel
+ */
+struct qcom_smd_id {
+ char name[20];
+};
/**
* struct qcom_smd_device - smd device struct
@@ -21,6 +77,7 @@ struct qcom_smd_device {
/**
* struct qcom_smd_driver - smd driver struct
* @driver: underlying device driver
+ * @smd_match_table: static channel match table
* @probe: invoked when the smd channel is found
* @remove: invoked when the smd channel is closed
* @callback: invoked when an inbound message is received on the channel,
@@ -29,9 +86,11 @@ struct qcom_smd_device {
*/
struct qcom_smd_driver {
struct device_driver driver;
+ const struct qcom_smd_id *smd_match_table;
+
int (*probe)(struct qcom_smd_device *dev);
void (*remove)(struct qcom_smd_device *dev);
- int (*callback)(struct qcom_smd_device *, const void *, size_t);
+ qcom_smd_cb_t callback;
};
int qcom_smd_driver_register(struct qcom_smd_driver *drv);
@@ -43,4 +102,8 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+ const char *name,
+ qcom_smd_cb_t cb);
+
#endif
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index a41833cd184c..c5cddc6901d0 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -5,9 +5,28 @@
#ifndef __LINUX_USB_CHIPIDEA_H
#define __LINUX_USB_CHIPIDEA_H
+#include <linux/extcon.h>
#include <linux/usb/otg.h>
struct ci_hdrc;
+
+/**
+ * struct ci_hdrc_cable - structure for external connector cable state tracking
+ * @state: current state of the line
+ * @changed: set to true when extcon event happen
+ * @edev: device which generate events
+ * @ci: driver state of the chipidea device
+ * @nb: hold event notification callback
+ * @conn: used for notification registration
+ */
+struct ci_hdrc_cable {
+ bool state;
+ bool changed;
+ struct extcon_dev *edev;
+ struct ci_hdrc *ci;
+ struct notifier_block nb;
+};
+
struct ci_hdrc_platform_data {
const char *name;
/* offset of the capability registers */
@@ -48,6 +67,10 @@ struct ci_hdrc_platform_data {
u32 ahb_burst_config;
u32 tx_burst_size;
u32 rx_burst_size;
+
+ /* VBUS and ID signal state tracking, using extcon framework */
+ struct ci_hdrc_cable vbus_extcon;
+ struct ci_hdrc_cable id_extcon;
};
/* Default offset of capability registers */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 7ca6690355ea..ee5b2dd922f6 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -58,6 +58,7 @@
#define HCI_RS232 4
#define HCI_PCI 5
#define HCI_SDIO 6
+#define HCI_SMD 7
/* HCI controller types */
#define HCI_BREDR 0x00
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 000000000000..29715506ed46
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,610 @@
+/* Copyright (c) 2009-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits. For example:
+ * 1.0 -> 0x00010000
+ * 2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) ((ver & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) (ver & 0x0000ffff)
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim() of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi() of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid() of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid() of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard() \
+ of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp() of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp() of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd() of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm() of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf() of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc() of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974() of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625() of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610() of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226() of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074() of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926() of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msmferrum() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmferrum")
+#define early_machine_is_msm8916() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010() \
+ of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#else
+#define of_board_is_sim() 0
+#define of_board_is_rumi() 0
+#define of_board_is_fluid() 0
+#define of_board_is_liquid() 0
+#define of_board_is_dragonboard() 0
+#define of_board_is_cdp() 0
+#define of_board_is_mtp() 0
+#define of_board_is_qrd() 0
+#define of_board_is_xpm() 0
+#define of_board_is_skuf() 0
+#define of_board_is_sbc() 0
+
+#define machine_is_msm8974() 0
+#define machine_is_msm9625() 0
+#define machine_is_msm8610() 0
+#define machine_is_msm8226() 0
+#define machine_is_apq8074() 0
+#define machine_is_msm8926() 0
+
+#define early_machine_is_msm8610() 0
+#define early_machine_is_msmferrum() 0
+#define early_machine_is_msm8916() 0
+#define early_machine_is_msm8936() 0
+#define early_machine_is_msm8939() 0
+#define early_machine_is_apq8084() 0
+#define early_machine_is_mdm9630() 0
+#define early_machine_is_fsm9900() 0
+#define early_machine_is_fsm9010() 0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM 1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE 6
+
+enum msm_cpu {
+ MSM_CPU_UNKNOWN = 0,
+ MSM_CPU_7X01,
+ MSM_CPU_7X25,
+ MSM_CPU_7X27,
+ MSM_CPU_8X50,
+ MSM_CPU_8X50A,
+ MSM_CPU_7X30,
+ MSM_CPU_8X55,
+ MSM_CPU_8X60,
+ MSM_CPU_8960,
+ MSM_CPU_8960AB,
+ MSM_CPU_7X27A,
+ FSM_CPU_9XXX,
+ MSM_CPU_7X25A,
+ MSM_CPU_7X25AA,
+ MSM_CPU_7X25AB,
+ MSM_CPU_8064,
+ MSM_CPU_8064AB,
+ MSM_CPU_8064AA,
+ MSM_CPU_8930,
+ MSM_CPU_8930AA,
+ MSM_CPU_8930AB,
+ MSM_CPU_7X27AA,
+ MSM_CPU_9615,
+ MSM_CPU_8974,
+ MSM_CPU_8974PRO_AA,
+ MSM_CPU_8974PRO_AB,
+ MSM_CPU_8974PRO_AC,
+ MSM_CPU_8627,
+ MSM_CPU_8625,
+ MSM_CPU_9625,
+ MSM_CPU_FERRUM,
+ MSM_CPU_8916,
+ MSM_CPU_8936,
+ MSM_CPU_8939,
+ MSM_CPU_8226,
+ MSM_CPU_8610,
+ MSM_CPU_8625Q,
+ MSM_CPU_8084,
+ MSM_CPU_9630,
+ FSM_CPU_9900,
+ MSM_CPU_ZIRC,
+ MSM_CPU_8994,
+ MSM_CPU_8992,
+ FSM_CPU_9010,
+};
+
+struct msm_soc_info {
+ enum msm_cpu generic_soc_type;
+ char *soc_id_string;
+};
+
+enum pmic_model {
+ PMIC_MODEL_PM8058 = 13,
+ PMIC_MODEL_PM8028 = 14,
+ PMIC_MODEL_PM8901 = 15,
+ PMIC_MODEL_PM8027 = 16,
+ PMIC_MODEL_ISL_9519 = 17,
+ PMIC_MODEL_PM8921 = 18,
+ PMIC_MODEL_PM8018 = 19,
+ PMIC_MODEL_PM8015 = 20,
+ PMIC_MODEL_PM8014 = 21,
+ PMIC_MODEL_PM8821 = 22,
+ PMIC_MODEL_PM8038 = 23,
+ PMIC_MODEL_PM8922 = 24,
+ PMIC_MODEL_PM8917 = 25,
+ PMIC_MODEL_UNKNOWN = 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+const int read_msm_cpu_type(void);
+const int get_core_count(void);
+const int cpu_is_krait(void);
+const int cpu_is_krait_v1(void);
+const int cpu_is_krait_v2(void);
+const int cpu_is_krait_v3(void);
+
+static inline int cpu_is_msm7x01(void)
+{
+#ifdef CONFIG_ARCH_MSM7X01A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X01;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25(void)
+{
+#ifdef CONFIG_ARCH_MSM7X25
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27(void)
+{
+#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x27aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X27AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25a(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25A;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25aa(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x25ab(void)
+{
+#ifdef CONFIG_ARCH_MSM7X27A
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X25AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm7x30(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_7X30;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_qsd8x50(void)
+{
+#ifdef CONFIG_ARCH_QSD8X50
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X50;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x55(void)
+{
+#ifdef CONFIG_ARCH_MSM7X30
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8X55;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8x60(void)
+{
+#ifdef CONFIG_ARCH_MSM8X60
+ return read_msm_cpu_type() == MSM_CPU_8X60;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8960(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_msm8960ab(void)
+{
+ return 0;
+}
+
+static inline int cpu_is_apq8064(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064ab(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_apq8064aa(void)
+{
+#ifdef CONFIG_ARCH_APQ8064
+ return read_msm_cpu_type() == MSM_CPU_8064AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8930ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8930AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8627(void)
+{
+/* 8930 and 8627 will share the same CONFIG_ARCH type unless otherwise needed */
+#ifdef CONFIG_ARCH_MSM8930
+ return read_msm_cpu_type() == MSM_CPU_8627;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_fsm9xxx(void)
+{
+#ifdef CONFIG_ARCH_FSM9XXX
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == FSM_CPU_9XXX;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm9615(void)
+{
+#ifdef CONFIG_ARCH_MSM9615
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_9615;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_aa(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AA;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ab(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AB;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8974pro_ac(void)
+{
+#ifdef CONFIG_ARCH_MSM8974
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8974PRO_AC;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msmferrum(void)
+{
+#ifdef CONFIG_ARCH_MSMFERRUM
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_FERRUM;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8916(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8916;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8936(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8936;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8939(void)
+{
+#ifdef CONFIG_ARCH_MSM8916
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8939;
+#else
+ return 0;
+#endif
+
+}
+
+static inline int cpu_is_msm8226(void)
+{
+#ifdef CONFIG_ARCH_MSM8226
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8226;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8610(void)
+{
+#ifdef CONFIG_ARCH_MSM8610
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8610;
+#else
+ return 0;
+#endif
+}
+
+static inline int cpu_is_msm8625q(void)
+{
+#ifdef CONFIG_ARCH_MSM8625
+ enum msm_cpu cpu = socinfo_get_msm_cpu();
+
+ BUG_ON(cpu == MSM_CPU_UNKNOWN);
+ return cpu == MSM_CPU_8625Q;
+#else
+ return 0;
+#endif
+}
+
+static inline int soc_class_is_msm8960(void)
+{
+ return cpu_is_msm8960() || cpu_is_msm8960ab();
+}
+
+static inline int soc_class_is_apq8064(void)
+{
+ return cpu_is_apq8064() || cpu_is_apq8064ab() || cpu_is_apq8064aa();
+}
+
+static inline int soc_class_is_msm8930(void)
+{
+ return cpu_is_msm8930() || cpu_is_msm8930aa() || cpu_is_msm8930ab() ||
+ cpu_is_msm8627();
+}
+
+static inline int soc_class_is_msm8974(void)
+{
+ return cpu_is_msm8974() || cpu_is_msm8974pro_aa() ||
+ cpu_is_msm8974pro_ab() || cpu_is_msm8974pro_ac();
+}
+
+#endif
diff --git a/kernel/configs/distro.config b/kernel/configs/distro.config
new file mode 100644
index 000000000000..dc34348884f6
--- /dev/null
+++ b/kernel/configs/distro.config
@@ -0,0 +1,230 @@
+# USB camera
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_USB_M5602=m
+CONFIG_USB_STV06XX=m
+CONFIG_USB_GL860=m
+CONFIG_USB_GSPCA_BENQ=m
+CONFIG_USB_GSPCA_CONEX=m
+CONFIG_USB_GSPCA_CPIA1=m
+CONFIG_USB_GSPCA_DTCS033=m
+CONFIG_USB_GSPCA_ETOMS=m
+CONFIG_USB_GSPCA_FINEPIX=m
+CONFIG_USB_GSPCA_JEILINJ=m
+CONFIG_USB_GSPCA_JL2005BCD=m
+CONFIG_USB_GSPCA_KINECT=m
+CONFIG_USB_GSPCA_KONICA=m
+CONFIG_USB_GSPCA_MARS=m
+CONFIG_USB_GSPCA_MR97310A=m
+CONFIG_USB_GSPCA_NW80X=m
+CONFIG_USB_GSPCA_OV519=m
+CONFIG_USB_GSPCA_OV534=m
+CONFIG_USB_GSPCA_OV534_9=m
+CONFIG_USB_GSPCA_PAC207=m
+CONFIG_USB_GSPCA_PAC7302=m
+CONFIG_USB_GSPCA_PAC7311=m
+CONFIG_USB_GSPCA_SE401=m
+CONFIG_USB_GSPCA_SN9C2028=m
+CONFIG_USB_GSPCA_SN9C20X=m
+CONFIG_USB_GSPCA_SONIXB=m
+CONFIG_USB_GSPCA_SONIXJ=m
+CONFIG_USB_GSPCA_SPCA500=m
+CONFIG_USB_GSPCA_SPCA501=m
+CONFIG_USB_GSPCA_SPCA505=m
+CONFIG_USB_GSPCA_SPCA506=m
+CONFIG_USB_GSPCA_SPCA508=m
+CONFIG_USB_GSPCA_SPCA561=m
+CONFIG_USB_GSPCA_SPCA1528=m
+CONFIG_USB_GSPCA_SQ905=m
+CONFIG_USB_GSPCA_SQ905C=m
+CONFIG_USB_GSPCA_SQ930X=m
+CONFIG_USB_GSPCA_STK014=m
+CONFIG_USB_GSPCA_STK1135=m
+CONFIG_USB_GSPCA_STV0680=m
+CONFIG_USB_GSPCA_SUNPLUS=m
+CONFIG_USB_GSPCA_T613=m
+CONFIG_USB_GSPCA_TOPRO=m
+CONFIG_USB_GSPCA_TOUPTEK=m
+CONFIG_USB_GSPCA_TV8532=m
+CONFIG_USB_GSPCA_VC032X=m
+CONFIG_USB_GSPCA_VICAM=m
+CONFIG_USB_GSPCA_XIRLINK_CIT=m
+CONFIG_USB_GSPCA_ZC3XX=m
+CONFIG_USB_PWC=m
+CONFIG_USB_ZR364XX=m
+CONFIG_USB_STKWEBCAM=m
+CONFIG_USB_S2255=m
+
+# USB serial
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_AIRCABLE=m
+CONFIG_USB_SERIAL_ARK3116=m
+CONFIG_USB_SERIAL_BELKIN=m
+CONFIG_USB_SERIAL_CH341=m
+CONFIG_USB_SERIAL_WHITEHEAT=m
+CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_CYPRESS_M8=m
+CONFIG_USB_SERIAL_EMPEG=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_VISOR=m
+CONFIG_USB_SERIAL_IPAQ=m
+CONFIG_USB_SERIAL_IR=m
+CONFIG_USB_SERIAL_EDGEPORT=m
+CONFIG_USB_SERIAL_EDGEPORT_TI=m
+CONFIG_USB_SERIAL_F81232=m
+CONFIG_USB_SERIAL_GARMIN=m
+CONFIG_USB_SERIAL_IPW=m
+CONFIG_USB_SERIAL_IUU=m
+CONFIG_USB_SERIAL_KEYSPAN_PDA=m
+CONFIG_USB_SERIAL_KLSI=m
+CONFIG_USB_SERIAL_KOBIL_SCT=m
+CONFIG_USB_SERIAL_MCT_U232=m
+CONFIG_USB_SERIAL_METRO=m
+CONFIG_USB_SERIAL_MOS7720=m
+CONFIG_USB_SERIAL_MOS7840=m
+CONFIG_USB_SERIAL_MXUPORT=m
+CONFIG_USB_SERIAL_NAVMAN=m
+CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OTI6858=m
+CONFIG_USB_SERIAL_QCAUX=m
+CONFIG_USB_SERIAL_QUALCOMM=m
+CONFIG_USB_SERIAL_SPCP8X5=m
+CONFIG_USB_SERIAL_SAFE=m
+CONFIG_USB_SERIAL_SIERRAWIRELESS=m
+CONFIG_USB_SERIAL_SYMBOL=m
+CONFIG_USB_SERIAL_TI=m
+CONFIG_USB_SERIAL_CYBERJACK=m
+CONFIG_USB_SERIAL_XIRCOM=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_SERIAL_OMNINET=m
+CONFIG_USB_SERIAL_OPTICON=m
+CONFIG_USB_SERIAL_XSENS_MT=m
+CONFIG_USB_SERIAL_WISHBONE=m
+CONFIG_USB_SERIAL_SSU100=m
+CONFIG_USB_SERIAL_QT2=m
+
+# USB gadget
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_ZERO=m
+CONFIG_USB_ETH=m
+CONFIG_USB_MASS_STORAGE=m
+CONFIG_USB_G_SERIAL=m
+CONFIG_NEW_LEDS=y
+
+# USB Eth
+CONFIG_USB_CATC=m
+CONFIG_USB_KAWETH=m
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_NET_SR9700=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_MCS7830=m
+
+# LEDs
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_ONESHOT=y
+CONFIG_LEDS_TRIGGER_GPIO=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_MAC80211_LEDS=y
+
+# systemd
+CONFIG_IPV6=y
+CONFIG_NAMESPACES=y
+CONFIG_NET_NS=y
+CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
+CONFIG_DEVTMPFS=y
+CONFIG_CGROUPS=y
+CONFIG_INOTIFY_USER=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EPOLL=y
+CONFIG_NET=y
+CONFIG_SYSFS=y
+CONFIG_PROC_FS=y
+CONFIG_FHANDLE=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+CONFIG_EXT3_FS_POSIX_ACL=y
+CONFIG_EXT3_FS_SECURITY=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_DEBUG=y
+
+# NFS server
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=m
+CONFIG_NFSD_V3=y
+CONFIG_NFSD_V3_ACL=y
+
+# HID
+CONFIG_HID_APPLE=y
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_MAGICMOUSE=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_PLANTRONICS=m
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_USB_COMPOSITE=m
+CONFIG_HID_MULTITOUCH=m
+
+#misc
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_STACKTRACE=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_PROVE_LOCKING=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_SND_USB=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_RFKILL=y
+CONFIG_BINFMT_MISC=m
+CONFIG_DYNAMIC_DEBUG=y
+
+# PPP
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
diff --git a/mm/memblock.c b/mm/memblock.c
index 1c7b647e5897..29cd3726374f 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1558,6 +1558,12 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
memblock.memory.regions[idx].size) >= end;
}
+int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+{
+ memblock_cap_size(base, &size);
+ return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+}
+
/**
* memblock_is_region_reserved - check if a region intersects reserved memory
* @base: base of region to check
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 56a368b32f9f..6e88d76d047c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5436,6 +5436,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
{
+ unsigned long __maybe_unused offset = 0;
+
/* Skip empty nodes */
if (!pgdat->node_spanned_pages)
return;
@@ -5452,6 +5454,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
* for the buddy allocator to function correctly.
*/
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
+ offset = pgdat->node_start_pfn - start;
end = pgdat_end_pfn(pgdat);
end = ALIGN(end, MAX_ORDER_NR_PAGES);
size = (end - start) * sizeof(struct page);
@@ -5459,7 +5462,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
if (!map)
map = memblock_virt_alloc_node_nopanic(size,
pgdat->node_id);
- pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
+ pgdat->node_mem_map = map + offset;
}
#ifndef CONFIG_NEED_MULTIPLE_NODES
/*
@@ -5467,9 +5470,9 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
*/
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
+#if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
- mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
+ mem_map -= offset;
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
}
#endif
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 0c9733ecd17f..ba8be1954732 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -486,6 +486,10 @@ config SND_SOC_MAX98925
config SND_SOC_MAX9850
tristate
+config SND_SOC_MSM8x16_WCD
+ tristate "Qualcomm MSM8x16 WCD"
+ depends on SPMI && MFD_SYSCON
+
config SND_SOC_PCM1681
tristate "Texas Instruments PCM1681 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 4a32077954ae..2c6c0e8aa178 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -72,6 +72,7 @@ snd-soc-max98925-objs := max98925.o
snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
+snd-soc-msm8x16-objs := msm8x16-wcd.o msm8x16-wcd-tables.o
snd-soc-hdmi-codec-objs := hdmi.o
snd-soc-pcm1681-objs := pcm1681.o
snd-soc-pcm1792a-codec-objs := pcm1792a.o
@@ -182,7 +183,6 @@ snd-soc-wm9705-objs := wm9705.o
snd-soc-wm9712-objs := wm9712.o
snd-soc-wm9713-objs := wm9713.o
snd-soc-wm-hubs-objs := wm_hubs.o
-
# Amp
snd-soc-max9877-objs := max9877.o
snd-soc-tpa6130a2-objs := tpa6130a2.o
@@ -264,6 +264,7 @@ obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
+obj-$(CONFIG_SND_SOC_MSM8x16_WCD) +=snd-soc-msm8x16.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
obj-$(CONFIG_SND_SOC_PCM1792A) += snd-soc-pcm1792a-codec.o
diff --git a/sound/soc/codecs/msm8x16-wcd-tables.c b/sound/soc/codecs/msm8x16-wcd-tables.c
new file mode 100644
index 000000000000..c6b1fd62bacc
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd-tables.c
@@ -0,0 +1,742 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8x16_wcd_registers.h"
+#include "msm8x16-wcd.h"
+
+const u8 msm8x16_wcd_reg_readable[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_SET] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_CLR] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_MID_SEL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PRIORITY] = 1,
+ [MSM8X16_WCD_A_DIGITAL_GPIO_MODE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3] = 1,
+ [MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_0] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_SET] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_MID_SEL] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PRIORITY] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_VAL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_MICB_2_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_2_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV] = 1,
+ [MSM8X16_WCD_A_ANALOG_TX_3_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLK] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_FBCTRL] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_BIAS] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_VCTRL] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_ATEST] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG] = 1,
+ [MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT] = 1,
+ [MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BYPASS_MODE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO] = 1,
+ [MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1] = 1,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST_2] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR] = 1,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_SD_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE] = 1,
+ [MSM8X16_WCD_A_CDC_TOP_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_MUX_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_MUX_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_DMIC_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_DMIC_CTL] = 1,
+ [MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR] = 1,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR] = 1,
+ [MSM8X16_WCD_A_DIGITAL_SEC_ACCESS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3] = 1,
+ [MSM8X16_WCD_A_ANALOG_SEC_ACCESS] = 1,
+};
+
+const u8 msm8x16_wcd_reg_readonly[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] = 1,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] = 1,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] = 1,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] = 1,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] = 1,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] = 1,
+};
+
+u8 msm8x16_wcd_reset_reg_defaults[MSM8X16_WCD_CACHE_SIZE] = {
+ [MSM8X16_WCD_A_DIGITAL_REVISION1] =
+ MSM8X16_WCD_A_DIGITAL_REVISION1__POR,
+ [MSM8X16_WCD_A_DIGITAL_REVISION2] =
+ MSM8X16_WCD_A_DIGITAL_REVISION2__POR,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_TYPE] =
+ MSM8X16_WCD_A_DIGITAL_PERPH_TYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE] =
+ MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_RT_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_RT_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE] =
+ MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH] =
+ MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW] =
+ MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR] =
+ MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_SET] =
+ MSM8X16_WCD_A_DIGITAL_INT_EN_SET__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_EN_CLR] =
+ MSM8X16_WCD_A_DIGITAL_INT_EN_CLR__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS] =
+ MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_MID_SEL] =
+ MSM8X16_WCD_A_DIGITAL_INT_MID_SEL__POR,
+ [MSM8X16_WCD_A_DIGITAL_INT_PRIORITY] =
+ MSM8X16_WCD_A_DIGITAL_INT_PRIORITY__POR,
+ [MSM8X16_WCD_A_DIGITAL_GPIO_MODE] =
+ MSM8X16_WCD_A_DIGITAL_GPIO_MODE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE] =
+ MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA] =
+ MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA__POR,
+ [MSM8X16_WCD_A_DIGITAL_PIN_STATUS] =
+ MSM8X16_WCD_A_DIGITAL_PIN_STATUS__POR,
+ [MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL] =
+ MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL] =
+ MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2__POR,
+ [MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3] =
+ MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2__POR,
+ [MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3] =
+ MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_0] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_0__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_1] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_1__POR,
+ [MSM8X16_WCD_A_DIGITAL_SPARE_2] =
+ MSM8X16_WCD_A_DIGITAL_SPARE_2__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION1] =
+ MSM8X16_WCD_A_ANALOG_REVISION1__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION2] =
+ MSM8X16_WCD_A_ANALOG_REVISION2__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION3] =
+ MSM8X16_WCD_A_ANALOG_REVISION3__POR,
+ [MSM8X16_WCD_A_ANALOG_REVISION4] =
+ MSM8X16_WCD_A_ANALOG_REVISION4__POR,
+ [MSM8X16_WCD_A_ANALOG_PERPH_TYPE] =
+ MSM8X16_WCD_A_ANALOG_PERPH_TYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE] =
+ MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_RT_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_RT_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_SET_TYPE] =
+ MSM8X16_WCD_A_ANALOG_INT_SET_TYPE__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH] =
+ MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW] =
+ MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR] =
+ MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_SET] =
+ MSM8X16_WCD_A_ANALOG_INT_EN_SET__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_EN_CLR] =
+ MSM8X16_WCD_A_ANALOG_INT_EN_CLR__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_PENDING_STS] =
+ MSM8X16_WCD_A_ANALOG_INT_PENDING_STS__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_MID_SEL] =
+ MSM8X16_WCD_A_ANALOG_INT_MID_SEL__POR,
+ [MSM8X16_WCD_A_ANALOG_INT_PRIORITY] =
+ MSM8X16_WCD_A_ANALOG_INT_PRIORITY__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_EN] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_VAL] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_VAL__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_CTL] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS] =
+ MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_MICB_2_EN] =
+ MSM8X16_WCD_A_ANALOG_MICB_2_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER] =
+ MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT] =
+ MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT__POR,
+ [MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT] =
+ MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_1_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_2_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_2_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV] =
+ MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV__POR,
+ [MSM8X16_WCD_A_ANALOG_TX_3_EN] =
+ MSM8X16_WCD_A_ANALOG_TX_3_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_EN] =
+ MSM8X16_WCD_A_ANALOG_NCP_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_CLK] =
+ MSM8X16_WCD_A_ANALOG_NCP_CLK__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH] =
+ MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_FBCTRL] =
+ MSM8X16_WCD_A_ANALOG_NCP_FBCTRL__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_BIAS] =
+ MSM8X16_WCD_A_ANALOG_NCP_BIAS__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_VCTRL] =
+ MSM8X16_WCD_A_ANALOG_NCP_VCTRL__POR,
+ [MSM8X16_WCD_A_ANALOG_NCP_TEST] =
+ MSM8X16_WCD_A_ANALOG_NCP_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER] =
+ MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC] =
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_CTL] =
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL___POR,
+ [MSM8X16_WCD_A_ANALOG_RX_ATEST] =
+ MSM8X16_WCD_A_ANALOG_RX_ATEST__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS] =
+ MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS] =
+ MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET] =
+ MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL] =
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG__POR,
+ [MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT] =
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT__POR,
+ [MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE] =
+ MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE__POR,
+ [MSM8X16_WCD_A_ANALOG_BYPASS_MODE] =
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL] =
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL__POR,
+ [MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO] =
+ MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO__POR,
+ [MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE] =
+ MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1] =
+ MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1__POR,
+ [MSM8X16_WCD_A_ANALOG_BOOST_TEST_2] =
+ MSM8X16_WCD_A_ANALOG_BOOST_TEST_2__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS] =
+ MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS] =
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS__POR,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR] =
+ MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR__POR,
+ [MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL] =
+ MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_OTHR_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_OTHR_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_MCLK_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_MCLK_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_PDM_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CLK_SD_CTL] =
+ MSM8X16_WCD_A_CDC_CLK_SD_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B3_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B4_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B5_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_B6_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL] =
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE] =
+ MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE__POR,
+ [MSM8X16_WCD_A_CDC_TOP_CTL] =
+ MSM8X16_WCD_A_CDC_TOP_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL] =
+ MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL] =
+ MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_DEBUG_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL] =
+ MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL__POR,
+ [MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL] =
+ MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN__POR,
+ [MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG__POR,
+ [MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG] =
+ MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG__POR,
+ [MSM8X16_WCD_A_CDC_TX1_MUX_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_MUX_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_MUX_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_MUX_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX1_DMIC_CTL] =
+ MSM8X16_WCD_A_CDC_TX1_DMIC_CTL__POR,
+ [MSM8X16_WCD_A_CDC_TX2_DMIC_CTL] =
+ MSM8X16_WCD_A_CDC_TX2_DMIC_CTL__POR,
+};
diff --git a/sound/soc/codecs/msm8x16-wcd.c b/sound/soc/codecs/msm8x16-wcd.c
new file mode 100644
index 000000000000..d0df8c3e1155
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd.c
@@ -0,0 +1,1636 @@
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/tlv.h>
+
+#include "msm8x16-wcd.h"
+#include "msm8x16_wcd_registers.h"
+
+#define MSM8X16_WCD_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 |\
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_48000)
+#define MSM8X16_WCD_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+
+#define TOMBAK_VERSION_1_0 0
+#define TOMBAK_IS_1_0(ver) \
+ ((ver == TOMBAK_VERSION_1_0) ? 1 : 0)
+
+#define HPHL_PA_DISABLE (0x01 << 1)
+#define HPHR_PA_DISABLE (0x01 << 2)
+#define EAR_PA_DISABLE (0x01 << 3)
+#define SPKR_PA_DISABLE (0x01 << 4)
+
+enum {
+ AIF1_PB = 0,
+ AIF1_CAP,
+ NUM_CODEC_DAIS,
+};
+
+static unsigned long rx_digital_gain_reg[] = {
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL,
+};
+
+struct wcd_chip {
+ struct regmap *analog_map;
+ struct regmap *digital_map;
+ unsigned int analog_base;
+ u16 pmic_rev;
+ u16 codec_version;
+ bool spk_boost_set;
+ u32 mute_mask;
+ u32 rx_bias_count;
+ bool ear_pa_boost_set;
+ bool lb_mode;
+ struct clk *mclk;
+
+ struct regulator *vddio;
+ struct regulator *vdd_pa;
+ struct regulator *vdd_px;
+ struct regulator *vdd_cp;
+};
+
+
+static int msm8x16_wcd_volatile(struct snd_soc_codec *codec, unsigned int reg)
+{
+ return msm8x16_wcd_reg_readonly[reg];
+}
+
+static int msm8x16_wcd_readable(struct snd_soc_codec *ssc, unsigned int reg)
+{
+ return msm8x16_wcd_reg_readable[reg];
+}
+
+static int __msm8x16_wcd_reg_write(struct snd_soc_codec *codec,
+ unsigned short reg, u8 val)
+{
+ int ret = -EINVAL;
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+
+ if (MSM8X16_WCD_IS_TOMBAK_REG(reg)) {
+ ret = regmap_write(chip->analog_map,
+ chip->analog_base + reg, val);
+ } else if (MSM8X16_WCD_IS_DIGITAL_REG(reg)) {
+ u32 temp = val & 0x000000FF;
+ u16 offset = (reg ^ 0x0200) & 0x0FFF;
+
+ ret = regmap_write(chip->digital_map, offset, temp);
+ }
+
+ return ret;
+}
+
+static int msm8x16_wcd_write(struct snd_soc_codec *codec, unsigned int reg,
+ unsigned int value)
+{
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg > MSM8X16_WCD_MAX_REGISTER);
+ if (!msm8x16_wcd_volatile(codec, reg))
+ msm8x16_wcd_reset_reg_defaults[reg] = value;
+
+ return __msm8x16_wcd_reg_write(codec, reg, (u8)value);
+}
+
+static int __msm8x16_wcd_reg_read(struct snd_soc_codec *codec,
+ unsigned short reg)
+{
+ int ret = -EINVAL;
+ u32 temp = 0;
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+
+ if (MSM8X16_WCD_IS_TOMBAK_REG(reg)) {
+ ret = regmap_read(chip->analog_map,
+ chip->analog_base + reg, &temp);
+ } else if (MSM8X16_WCD_IS_DIGITAL_REG(reg)) {
+ u32 val;
+ u16 offset = (reg ^ 0x0200) & 0x0FFF;
+
+ ret = regmap_read(chip->digital_map, offset, &val);
+ temp = (u8)val;
+ }
+
+ if (ret < 0) {
+ dev_err(codec->dev,
+ "%s: codec read failed for reg 0x%x\n",
+ __func__, reg);
+ return ret;
+ }
+
+ dev_dbg(codec->dev, "Read 0x%02x from 0x%x\n", temp, reg);
+
+ return temp;
+}
+
+
+static unsigned int msm8x16_wcd_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ unsigned int val;
+
+ if (reg == SND_SOC_NOPM)
+ return 0;
+
+ BUG_ON(reg > MSM8X16_WCD_MAX_REGISTER);
+
+ if (!msm8x16_wcd_volatile(codec, reg) &&
+ msm8x16_wcd_readable(codec, reg) &&
+ reg < codec->driver->reg_cache_size) {
+ return msm8x16_wcd_reset_reg_defaults[reg];
+ }
+
+ val = __msm8x16_wcd_reg_read(codec, reg);
+
+ return val;
+}
+
+static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults[] = {
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static const struct msm8x16_wcd_reg_mask_val msm8x16_wcd_reg_defaults_2_0[] = {
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3, 0x0F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS, 0x4B),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_NCP_FBCTRL, 0x28),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0x69),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG, 0x01),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL, 0x5F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO, 0x88),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3, 0x0F),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT, 0x82),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x03),
+ MSM8X16_WCD_REG_VAL(MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL, 0xE1),
+};
+
+static int msm8x16_wcd_bringup(struct snd_soc_codec *codec)
+{
+ snd_soc_write(codec, MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4, 0x01);
+ snd_soc_write(codec, MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4, 0x01);
+ return 0;
+}
+
+static const struct msm8x16_wcd_reg_mask_val
+ msm8x16_wcd_codec_reg_init_val[] = {
+
+ /* Initialize current threshold to 350MA
+ * number of wait and run cycles to 4096
+ */
+ {MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL, 0xFF, 0xD1},
+ {MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT, 0xFF, 0xFF},
+};
+
+static void msm8x16_wcd_codec_init_reg(struct snd_soc_codec *codec)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_codec_reg_init_val); i++)
+ snd_soc_update_bits(codec,
+ msm8x16_wcd_codec_reg_init_val[i].reg,
+ msm8x16_wcd_codec_reg_init_val[i].mask,
+ msm8x16_wcd_codec_reg_init_val[i].val);
+}
+
+static void msm8x16_wcd_update_reg_defaults(struct snd_soc_codec *codec)
+{
+ u32 i;
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ if (TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev)) {
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults); i++)
+ snd_soc_write(codec, msm8x16_wcd_reg_defaults[i].reg,
+ msm8x16_wcd_reg_defaults[i].val);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(msm8x16_wcd_reg_defaults_2_0); i++)
+ snd_soc_write(codec,
+ msm8x16_wcd_reg_defaults_2_0[i].reg,
+ msm8x16_wcd_reg_defaults_2_0[i].val);
+ }
+}
+
+static int msm8x16_wcd_device_up(struct snd_soc_codec *codec)
+{
+ u32 reg;
+
+ dev_dbg(codec->dev, "%s: device up!\n", __func__);
+ msm8x16_wcd_bringup(codec);
+
+ for (reg = 0; reg < ARRAY_SIZE(msm8x16_wcd_reset_reg_defaults); reg++)
+ if (msm8x16_wcd_reg_readable[reg])
+ msm8x16_wcd_write(codec,
+ reg, msm8x16_wcd_reset_reg_defaults[reg]);
+
+ /* delay is required to make sure sound card state updated */
+ usleep_range(5000, 5100);
+
+ msm8x16_wcd_codec_init_reg(codec);
+ msm8x16_wcd_update_reg_defaults(codec);
+
+
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_clock_block(struct snd_soc_codec *codec,
+ int enable)
+{
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+ unsigned long mclk_rate;
+
+ if (enable) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_MCLK_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL, 0x03, 0x03);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL, 0x30, 0x30);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL, 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x0C);
+
+ mclk_rate = clk_get_rate(msm8x16_wcd->mclk);
+
+ if (mclk_rate == 12288000)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TOP_CTL, 0x01, 0x00);
+ else if (mclk_rate == 9600000)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_TOP_CTL, 0x01, 0x01);
+ } else {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_PDM_CTL, 0x03, 0x00);
+
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_probe(struct snd_soc_codec *codec)
+{
+ struct wcd_chip *chip = dev_get_drvdata(codec->dev);
+ int err;
+
+ snd_soc_codec_set_drvdata(codec, chip);
+ chip->pmic_rev = snd_soc_read(codec, MSM8X16_WCD_A_DIGITAL_REVISION1);
+ dev_info(codec->dev, "%s :PMIC REV: %d", __func__,
+ chip->pmic_rev);
+
+ chip->codec_version = snd_soc_read(codec,
+ MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE);
+ dev_info(codec->dev, "%s :CODEC Version: %d", __func__,
+ chip->codec_version);
+
+ msm8x16_wcd_device_up(codec);
+
+ err = regulator_enable(chip->vddio);
+ if (err < 0) {
+ dev_err(codec->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+
+ err = regulator_enable(chip->vdd_pa);
+ if (err < 0) {
+ dev_err(codec->dev, "failed to enable VDD regulator\n");
+ return err;
+ }
+
+ msm8x16_wcd_codec_enable_clock_block(codec, 1);
+
+ return 0;
+}
+
+static int msm8x16_wcd_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev, "%s(): substream = %s stream = %d\n",
+ __func__,
+ substream->name, substream->stream);
+ return 0;
+}
+
+static void msm8x16_wcd_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ dev_dbg(dai->codec->dev,
+ "%s(): substream = %s stream = %d\n", __func__,
+ substream->name, substream->stream);
+}
+
+static int msm8x16_wcd_set_interpolator_rate(struct snd_soc_dai *dai,
+ u8 rx_fs_rate_reg_val, u32 sample_rate)
+{
+ return 0;
+}
+
+static int msm8x16_wcd_set_decimator_rate(struct snd_soc_dai *dai,
+ u8 tx_fs_rate_reg_val, u32 sample_rate)
+{
+ return 0;
+}
+
+static int msm8x16_wcd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ u8 tx_fs_rate, rx_fs_rate;
+ int ret;
+
+ dev_dbg(dai->codec->dev,
+ "%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
+ __func__, dai->name, dai->id, params_rate(params),
+ params_channels(params), params_format(params));
+
+ switch (params_rate(params)) {
+ case 8000:
+ tx_fs_rate = 0x00;
+ rx_fs_rate = 0x00;
+ break;
+ case 16000:
+ tx_fs_rate = 0x01;
+ rx_fs_rate = 0x20;
+ break;
+ case 32000:
+ tx_fs_rate = 0x02;
+ rx_fs_rate = 0x40;
+ break;
+ case 48000:
+ tx_fs_rate = 0x03;
+ rx_fs_rate = 0x60;
+ break;
+ case 96000:
+ tx_fs_rate = 0x04;
+ rx_fs_rate = 0x80;
+ break;
+ case 192000:
+ tx_fs_rate = 0x05;
+ rx_fs_rate = 0xA0;
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid sampling rate %d\n", __func__,
+ params_rate(params));
+ return -EINVAL;
+ }
+
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_CAPTURE:
+ ret = msm8x16_wcd_set_decimator_rate(dai, tx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
+ }
+ break;
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ ret = msm8x16_wcd_set_interpolator_rate(dai, rx_fs_rate,
+ params_rate(params));
+ if (ret < 0) {
+ dev_err(dai->codec->dev,
+ "%s: set decimator rate failed %d\n", __func__,
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(dai->codec->dev,
+ "%s: Invalid stream type %d\n", __func__,
+ substream->stream);
+ return -EINVAL;
+ }
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x20);
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ snd_soc_update_bits(dai->codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 0x20, 0x00);
+ break;
+ default:
+ dev_err(dai->dev, "%s: wrong format selected\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int msm8x16_wcd_set_dai_sysclk(struct snd_soc_dai *dai,
+ int clk_id, unsigned int freq, int dir)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm8x16_wcd_set_channel_map(struct snd_soc_dai *dai,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot)
+
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int msm8x16_wcd_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ dev_dbg(dai->codec->dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops msm8x16_wcd_dai_ops = {
+ .startup = msm8x16_wcd_startup,
+ .shutdown = msm8x16_wcd_shutdown,
+ .hw_params = msm8x16_wcd_hw_params,
+ .set_sysclk = msm8x16_wcd_set_dai_sysclk,
+ .set_fmt = msm8x16_wcd_set_dai_fmt,
+ .set_channel_map = msm8x16_wcd_set_channel_map,
+};
+
+static struct snd_soc_dai_driver msm8x16_wcd_codec_dai[] = {
+ {
+ .name = "msm8x16_wcd_i2s_rx1",
+ .id = AIF1_PB,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .rates = MSM8X16_WCD_RATES,
+ .formats = MSM8X16_WCD_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 3,
+ },
+ .ops = &msm8x16_wcd_dai_ops,
+ },
+ {
+ .name = "msm8x16_wcd_i2s_tx1",
+ .id = AIF1_CAP,
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .rates = MSM8X16_WCD_RATES,
+ .formats = MSM8X16_WCD_FORMATS,
+ .rate_max = 192000,
+ .rate_min = 8000,
+ .channels_min = 1,
+ .channels_max = 4,
+ },
+ .ops = &msm8x16_wcd_dai_ops,
+ },
+};
+
+static int msm8x16_wcd_codec_remove(struct snd_soc_codec *codec)
+{
+ /* TODO */
+ return 0;
+};
+
+
+static int msm8x16_wcd_spk_boost_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct wcd_chip *msm8x16_wcd = dev_get_drvdata(codec->dev);
+
+ if (msm8x16_wcd->spk_boost_set == false) {
+ ucontrol->value.integer.value[0] = 0;
+ } else if (msm8x16_wcd->spk_boost_set == true) {
+ ucontrol->value.integer.value[0] = 1;
+ } else {
+ dev_err(codec->dev, "%s: ERROR: Unsupported Speaker Boost = %d\n",
+ __func__, msm8x16_wcd->spk_boost_set);
+ return -EINVAL;
+ }
+
+ dev_dbg(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n", __func__,
+ msm8x16_wcd->spk_boost_set);
+ return 0;
+}
+
+static int msm8x16_wcd_spk_boost_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 0:
+ msm8x16_wcd->spk_boost_set = false;
+ break;
+ case 1:
+ msm8x16_wcd->spk_boost_set = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_dbg(codec->dev, "%s: msm8x16_wcd->spk_boost_set = %d\n",
+ __func__, msm8x16_wcd->spk_boost_set);
+ return 0;
+}
+
+static const char * const hph_text[] = {
+ "ZERO", "Switch",
+};
+
+static const struct soc_enum hph_enum =
+ SOC_ENUM_SINGLE(0, 0, ARRAY_SIZE(hph_text), hph_text);
+
+
+static const struct snd_kcontrol_new hphl_mux[] = {
+ SOC_DAPM_ENUM("HPHL", hph_enum)
+};
+
+static const struct snd_kcontrol_new hphr_mux[] = {
+ SOC_DAPM_ENUM("HPHR", hph_enum)
+};
+
+static const struct snd_kcontrol_new spkr_switch[] = {
+ SOC_DAPM_SINGLE("Switch",
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 7, 1, 0)
+};
+
+static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
+static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
+
+static const struct snd_soc_dapm_route audio_map[] = {
+ {"RX_I2S_CLK", NULL, "CDC_CONN"},
+ {"I2S RX1", NULL, "RX_I2S_CLK"},
+ {"I2S RX2", NULL, "RX_I2S_CLK"},
+ {"I2S RX3", NULL, "RX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "TX_I2S_CLK"},
+ {"I2S TX2", NULL, "TX_I2S_CLK"},
+
+ {"I2S TX1", NULL, "DEC1 MUX"},
+ {"I2S TX2", NULL, "DEC2 MUX"},
+
+ /* RDAC Connections */
+ {"HPHR DAC", NULL, "RDAC2 MUX"},
+ {"RDAC2 MUX", "RX1", "RX1 CHAIN"},
+ {"RDAC2 MUX", "RX2", "RX2 CHAIN"},
+
+ /* Earpiece (RX MIX1) */
+ {"EAR", NULL, "EAR_S"},
+ {"EAR_S", "Switch", "EAR PA"},
+ {"EAR PA", NULL, "RX_BIAS"},
+ {"EAR PA", NULL, "HPHL DAC"},
+ {"EAR PA", NULL, "HPHR DAC"},
+ {"EAR PA", NULL, "EAR CP"},
+
+ /* Headset (RX MIX1 and RX MIX2) */
+ {"HEADPHONE", NULL, "HPHL PA"},
+ {"HEADPHONE", NULL, "HPHR PA"},
+
+
+ {"HPHL PA", NULL, "HPHL"},
+ {"HPHR PA", NULL, "HPHR"},
+ {"HPHL", "Switch", "HPHL DAC"},
+ {"HPHR", "Switch", "HPHR DAC"},
+ {"HPHL PA", NULL, "CP"},
+ {"HPHL PA", NULL, "RX_BIAS"},
+ {"HPHR PA", NULL, "CP"},
+ {"HPHR PA", NULL, "RX_BIAS"},
+ {"HPHL DAC", NULL, "RX1 CHAIN"},
+
+ {"SPK_OUT", NULL, "SPK PA"},
+ {"SPK PA", NULL, "SPK_RX_BIAS"},
+ {"SPK PA", NULL, "SPK DAC"},
+ {"SPK DAC", "Switch", "RX3 CHAIN"},
+
+ {"RX1 CHAIN", NULL, "RX1 CLK"},
+ {"RX2 CHAIN", NULL, "RX2 CLK"},
+ {"RX3 CHAIN", NULL, "RX3 CLK"},
+ {"RX1 CHAIN", NULL, "RX1 MIX2"},
+ {"RX2 CHAIN", NULL, "RX2 MIX2"},
+ {"RX1 CHAIN", NULL, "RX1 MIX1"},
+ {"RX2 CHAIN", NULL, "RX2 MIX1"},
+ {"RX3 CHAIN", NULL, "RX3 MIX1"},
+
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP1"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP2"},
+ {"RX1 MIX1", NULL, "RX1 MIX1 INP3"},
+
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP1"},
+ {"RX2 MIX1", NULL, "RX2 MIX1 INP2"},
+
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP1"},
+ {"RX3 MIX1", NULL, "RX3 MIX1 INP2"},
+
+ {"RX1 MIX2", NULL, "RX1 MIX2 INP1"},
+ {"RX2 MIX2", NULL, "RX2 MIX2 INP1"},
+
+ {"RX1 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX1 MIX1 INP2", "IIR1", "IIR1"},
+ {"RX1 MIX1 INP3", "RX1", "I2S RX1"},
+ {"RX1 MIX1 INP3", "RX2", "I2S RX2"},
+ {"RX1 MIX1 INP3", "RX3", "I2S RX3"},
+
+ {"RX2 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX2 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX2 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX2 MIX1 INP2", "IIR1", "IIR1"},
+
+ {"RX3 MIX1 INP1", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP1", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP1", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP1", "IIR1", "IIR1"},
+ {"RX3 MIX1 INP2", "RX1", "I2S RX1"},
+ {"RX3 MIX1 INP2", "RX2", "I2S RX2"},
+ {"RX3 MIX1 INP2", "RX3", "I2S RX3"},
+ {"RX3 MIX1 INP2", "IIR1", "IIR1"},
+
+ {"RX1 MIX2 INP1", "IIR1", "IIR1"},
+ {"RX2 MIX2 INP1", "IIR1", "IIR1"},
+
+ /* Decimator Inputs */
+ {"DEC1 MUX", "DMIC1", "DMIC1"},
+ {"DEC1 MUX", "DMIC2", "DMIC2"},
+ {"DEC1 MUX", "ADC1", "ADC1"},
+ {"DEC1 MUX", "ADC2", "ADC2"},
+ {"DEC1 MUX", "ADC3", "ADC3"},
+ {"DEC1 MUX", NULL, "CDC_CONN"},
+
+ {"DEC2 MUX", "DMIC1", "DMIC1"},
+ {"DEC2 MUX", "DMIC2", "DMIC2"},
+ {"DEC2 MUX", "ADC1", "ADC1"},
+ {"DEC2 MUX", "ADC2", "ADC2"},
+ {"DEC2 MUX", "ADC3", "ADC3"},
+ {"DEC2 MUX", NULL, "CDC_CONN"},
+
+ /* ADC Connections */
+ {"ADC2", NULL, "ADC2 MUX"},
+ {"ADC3", NULL, "ADC2 MUX"},
+ {"ADC2 MUX", "INP2", "ADC2_INP2"},
+ {"ADC2 MUX", "INP3", "ADC2_INP3"},
+
+ {"ADC1", NULL, "AMIC1"},
+ {"ADC2_INP2", NULL, "AMIC2"},
+ {"ADC2_INP3", NULL, "AMIC3"},
+
+ /* TODO: Fix this */
+ {"IIR1", NULL, "IIR1 INP1 MUX"},
+ {"IIR1 INP1 MUX", "DEC1", "DEC1 MUX"},
+ {"IIR1 INP1 MUX", "DEC2", "DEC2 MUX"},
+ {"MIC BIAS Internal1", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal2", NULL, "INT_LDO_H"},
+ {"MIC BIAS External", NULL, "INT_LDO_H"},
+ {"MIC BIAS External2", NULL, "INT_LDO_H"},
+ {"MIC BIAS Internal1", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS Internal2", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External", NULL, "MICBIAS_REGULATOR"},
+ {"MIC BIAS External2", NULL, "MICBIAS_REGULATOR"},
+};
+
+static const char * const rx_mix1_text[] = {
+ "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
+};
+
+static const char * const rx_mix2_text[] = {
+ "ZERO", "IIR1", "IIR2"
+};
+
+static const char * const dec_mux_text[] = {
+ "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
+};
+
+static const char * const adc2_mux_text[] = {
+ "ZERO", "INP2", "INP3"
+};
+
+static const char * const rdac2_mux_text[] = {
+ "ZERO", "RX2", "RX1"
+};
+
+static const char * const iir1_inp1_text[] = {
+ "ZERO", "DEC1", "DEC2", "RX1", "RX2", "RX3"
+};
+
+/* RX1 MIX1 */
+static const struct soc_enum rx_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL,
+ 0, 6, rx_mix1_text);
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX2 MIX1 */
+static const struct soc_enum rx2_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rx2_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL,
+ 0, 3, rx_mix2_text);
+
+/* RX3 MIX1 */
+static const struct soc_enum rx3_mix1_inp1_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+
+static const struct soc_enum rx3_mix1_inp2_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 3, 6, rx_mix1_text);
+
+static const struct soc_enum rdac2_mux_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL,
+ 0, 3, rdac2_mux_text);
+
+static const struct soc_enum rx3_mix1_inp3_chain_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL,
+ 0, 6, rx_mix1_text);
+static const struct snd_kcontrol_new rx_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP1 Mux", rx_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rdac2_mux =
+ SOC_DAPM_ENUM("RDAC2 MUX Mux", rdac2_mux_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP2 Mux", rx_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX1 MIX1 INP3 Mux", rx_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP1 Mux", rx2_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP2 Mux", rx2_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX2 MIX1 INP3 Mux", rx2_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp1_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP1 Mux", rx3_mix1_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp2_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP2 Mux", rx3_mix1_inp2_chain_enum);
+
+static const struct snd_kcontrol_new rx3_mix1_inp3_mux =
+ SOC_DAPM_ENUM("RX3 MIX1 INP3 Mux", rx3_mix1_inp3_chain_enum);
+
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux =
+ SOC_DAPM_ENUM("RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
+
+static const char * const msm8x16_wcd_loopback_mode_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+
+static const struct soc_enum msm8x16_wcd_loopback_mode_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_loopback_mode_ctrl_text),
+};
+
+static const char * const msm8x16_wcd_ear_pa_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm8x16_wcd_ear_pa_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_boost_ctrl_text),
+};
+
+static const char * const msm8x16_wcd_ear_pa_gain_text[] = {
+ "POS_6_DB", "POS_1P5_DB"};
+static const struct soc_enum msm8x16_wcd_ear_pa_gain_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_ear_pa_gain_text),
+};
+
+static const char * const msm8x16_wcd_spk_boost_ctrl_text[] = {
+ "DISABLE", "ENABLE"};
+static const struct soc_enum msm8x16_wcd_spk_boost_ctl_enum[] = {
+ SOC_ENUM_SINGLE_EXT(2, msm8x16_wcd_spk_boost_ctrl_text),
+};
+
+/*cut of frequency for high pass filter*/
+static const char * const cf_text[] = {
+ "MIN_3DB_4Hz", "MIN_3DB_75Hz", "MIN_3DB_150Hz"
+};
+
+static const struct soc_enum cf_dec1_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_TX1_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_dec2_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_TX2_MUX_CTL, 4, 3, cf_text);
+
+static const struct soc_enum cf_rxmix1_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX1_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix2_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX2_B4_CTL, 0, 3, cf_text);
+
+static const struct soc_enum cf_rxmix3_enum =
+ SOC_ENUM_SINGLE(MSM8X16_WCD_A_CDC_RX3_B4_CTL, 0, 3, cf_text);
+
+static const struct snd_kcontrol_new msm8x16_wcd_snd_controls[] = {
+
+ SOC_ENUM_EXT("Speaker Boost", msm8x16_wcd_spk_boost_ctl_enum[0],
+ msm8x16_wcd_spk_boost_get, msm8x16_wcd_spk_boost_set),
+
+ SOC_SINGLE_TLV("ADC1 Volume", MSM8X16_WCD_A_ANALOG_TX_1_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC2 Volume", MSM8X16_WCD_A_ANALOG_TX_2_EN, 3,
+ 8, 0, analog_gain),
+ SOC_SINGLE_TLV("ADC3 Volume", MSM8X16_WCD_A_ANALOG_TX_3_EN, 3,
+ 8, 0, analog_gain),
+
+ SOC_SINGLE_SX_TLV("RX1 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX2 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+ SOC_SINGLE_SX_TLV("RX3 Digital Volume",
+ MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL,
+ 0, -84, 40, digital_gain),
+
+ SOC_SINGLE("RX1 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX1_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX2 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX2_B5_CTL, 2, 1, 0),
+ SOC_SINGLE("RX3 HPF Switch",
+ MSM8X16_WCD_A_CDC_RX3_B5_CTL, 2, 1, 0),
+
+ SOC_ENUM("RX1 HPF cut off", cf_rxmix1_enum),
+ SOC_ENUM("RX2 HPF cut off", cf_rxmix2_enum),
+ SOC_ENUM("RX3 HPF cut off", cf_rxmix3_enum),
+};
+
+
+static const struct snd_kcontrol_new ear_pa_switch[] = {
+ SOC_DAPM_SINGLE("Switch",
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 5, 1, 0)
+};
+
+static int msm8x16_wcd_codec_enable_ear_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after select EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 20ms after enabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x40, 0x40);
+ usleep_range(7000, 7100);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= EAR_PA_DISABLE;
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ dev_dbg(codec->dev,
+ "%s: Sleeping 7ms after disabling EAR PA\n",
+ __func__);
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x40, 0x00);
+ usleep_range(7000, 7100);
+ /*
+ * Reset pa select bit from ear to hph after ear pa
+ * is disabled to reduce ear turn off pop
+ */
+ snd_soc_update_bits(codec, MSM8X16_WCD_A_ANALOG_RX_EAR_CTL,
+ 0x80, 0x00);
+ break;
+ }
+ return 0;
+}
+
+
+static int msm8x16_wcd_codec_enable_spk_pa(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x10);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x01);
+ if (!msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x10);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0xE0);
+ if (!TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ if (msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0xEF, 0xEF);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ break;
+ case SND_SOC_DAPM_PRE_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= SPKR_PA_DISABLE;
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x00);
+ if (msm8x16_wcd->spk_boost_set)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL, 0xEF, 0x00);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL, 0x10, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0xE0, 0x00);
+ if (!TOMBAK_IS_1_0(msm8x16_wcd->pmic_rev))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_EAR_CTL, 0x01, 0x00);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x10, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_interpolator(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ /* apply the digital gain after the interpolator is enabled*/
+ if ((w->shift) < ARRAY_SIZE(rx_digital_gain_reg))
+ snd_soc_write(codec,
+ rx_digital_gain_reg[w->shift],
+ snd_soc_read(codec,
+ rx_digital_gain_reg[w->shift])
+ );
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 1 << w->shift);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL,
+ 1 << w->shift, 0x0);
+ /*
+ * disable the mute enabled during the PMD of this device
+ */
+ if (msm8x16_wcd->mute_mask & HPHL_PA_DISABLE) {
+ pr_debug("disabling HPHL mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(HPHL_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & HPHR_PA_DISABLE) {
+ pr_debug("disabling HPHR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(HPHR_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & SPKR_PA_DISABLE) {
+ pr_debug("disabling SPKR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(SPKR_PA_DISABLE);
+ }
+ if (msm8x16_wcd->mute_mask & EAR_PA_DISABLE) {
+ pr_debug("disabling EAR mute\n");
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ msm8x16_wcd->mute_mask &= ~(EAR_PA_DISABLE);
+ }
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_dig_clk(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->shift == 2)
+ snd_soc_update_bits(codec, w->reg, 0x80, 0x80);
+ if (msm8x16_wcd->spk_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SEC_ACCESS,
+ 0xA5, 0xA5);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3,
+ 0x0F, 0x0F);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT,
+ 0x82, 0x82);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x20);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT,
+ 0x83, 0x83);
+ } else if (msm8x16_wcd->ear_pa_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_SEC_ACCESS,
+ 0xA5, 0xA5);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3,
+ 0x07, 0x07);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x40, 0x40);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x80, 0x80);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0xDF);
+ } else {
+ snd_soc_update_bits(codec, w->reg, 1<<w->shift,
+ 1<<w->shift);
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (msm8x16_wcd->spk_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0xDF, 0x5F);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x20, 0x00);
+ } else if (msm8x16_wcd->ear_pa_boost_set) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x80, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_BYPASS_MODE,
+ 0x40, 0x00);
+ } else {
+ snd_soc_update_bits(codec, w->reg, 1<<w->shift, 0x00);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_rx_chain(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ dev_dbg(codec->dev,
+ "%s: PMU:Sleeping 20ms after disabling mute\n",
+ __func__);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ dev_dbg(codec->dev,
+ "%s: PMD:Sleeping 20ms after disabling mute\n",
+ __func__);
+ snd_soc_update_bits(codec, w->reg,
+ 1 << w->shift, 0x00);
+ msleep(20);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ msm8x16_wcd->rx_bias_count++;
+ if (msm8x16_wcd->rx_bias_count == 1)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC,
+ 0x81, 0x81);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ msm8x16_wcd->rx_bias_count--;
+ if (msm8x16_wcd->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC,
+ 0x81, 0x00);
+ break;
+ }
+ dev_dbg(codec->dev, "%s rx_bias_count = %d\n",
+ __func__, msm8x16_wcd->rx_bias_count);
+ return 0;
+}
+
+static int msm8x16_wcd_codec_enable_charge_pump(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "EAR CP")))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x80);
+ else
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0xC0, 0xC0);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ if (!(strcmp(w->name, "EAR CP")))
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ else {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x40, 0x00);
+ if (msm8x16_wcd->rx_bias_count == 0)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0x80, 0x00);
+ dev_dbg(codec->dev, "%s: rx_bias_count = %d\n",
+ __func__, msm8x16_wcd->rx_bias_count);
+ }
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_hphl_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x01);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x02);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x02, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x01, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_hph_pa_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+ struct wcd_chip *msm8x16_wcd = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST, 0x04, 0x04);
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST, 0x04, 0x04);
+ }
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_NCP_FBCTRL, 0x20, 0x20);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
+ usleep_range(4000, 4100);
+ if (w->shift == 5)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x00);
+ else if (w->shift == 4)
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x00);
+ usleep_range(10000, 10100);
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= HPHL_PA_DISABLE;
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0x01, 0x01);
+ msleep(20);
+ msm8x16_wcd->mute_mask |= HPHR_PA_DISABLE;
+ }
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ if (w->shift == 5) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST, 0x04, 0x00);
+
+ } else if (w->shift == 4) {
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST, 0x04, 0x00);
+ }
+ usleep_range(4000, 4100);
+
+ usleep_range(CODEC_DELAY_1_MS, CODEC_DELAY_1_1_MS);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x40, 0x40);
+ dev_dbg(codec->dev,
+ "%s: sleep 10 ms after %s PA disable.\n", __func__,
+ w->name);
+ usleep_range(10000, 10100);
+ break;
+ }
+ return 0;
+}
+
+static int msm8x16_wcd_hphr_dac_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x02);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x01);
+ break;
+ case SND_SOC_DAPM_POST_PMU:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 0x02, 0x00);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL, 0x01, 0x00);
+ snd_soc_update_bits(codec,
+ MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL, 0x02, 0x00);
+ break;
+ }
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget msm8x16_wcd_dapm_widgets[] = {
+ /*RX stuff */
+ SND_SOC_DAPM_OUTPUT("EAR"),
+
+ SND_SOC_DAPM_PGA_E("EAR PA", SND_SOC_NOPM,
+ 0, 0, NULL, 0, msm8x16_wcd_codec_enable_ear_pa,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER("EAR_S", SND_SOC_NOPM, 0, 0,
+ ear_pa_switch, ARRAY_SIZE(ear_pa_switch)),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX1", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX2", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_AIF_IN("I2S RX3", "AIF1 Playback", 0, SND_SOC_NOPM, 0, 0),
+
+ SND_SOC_DAPM_SUPPLY("INT_LDO_H", SND_SOC_NOPM, 1, 0, NULL, 0),
+
+ SND_SOC_DAPM_OUTPUT("HEADPHONE"),
+ SND_SOC_DAPM_PGA_E("HPHL PA", MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN,
+ 5, 0, NULL, 0,
+ msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("HPHL", SND_SOC_NOPM, 0, 0, hphl_mux),
+
+ SND_SOC_DAPM_MIXER_E("HPHL DAC",
+ MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm8x16_wcd_hphl_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_PGA_E("HPHR PA", MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN,
+ 4, 0, NULL, 0,
+ msm8x16_wcd_hph_pa_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("HPHR", SND_SOC_NOPM, 0, 0, hphr_mux),
+
+ SND_SOC_DAPM_MIXER("SPK DAC", SND_SOC_NOPM, 0, 0,
+ spkr_switch, ARRAY_SIZE(spkr_switch)),
+
+ SND_SOC_DAPM_MIXER_E("HPHR DAC",
+ MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL, 3, 0, NULL,
+ 0, msm8x16_wcd_hphr_dac_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ /* Speaker */
+ SND_SOC_DAPM_OUTPUT("SPK_OUT"),
+
+ SND_SOC_DAPM_PGA_E("SPK PA", MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL,
+ 6, 0, NULL, 0, msm8x16_wcd_codec_enable_spk_pa,
+ SND_SOC_DAPM_PRE_REG|
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX1 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL, 0,
+ msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_REG|
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD|
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX2 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL, 0,
+ msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_PRE_REG|
+ SND_SOC_DAPM_POST_PMU |
+ SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MIXER_E("RX1 MIX2",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 0, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 MIX2",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 1, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 MIX1",
+ MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL, 2, 0, NULL,
+ 0, msm8x16_wcd_codec_enable_interpolator,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("RX1 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 0, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX2 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 1, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("RX3 CLK", MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL,
+ 2, 0, msm8x16_wcd_codec_enable_dig_clk, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX1 CHAIN", MSM8X16_WCD_A_CDC_RX1_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX2 CHAIN", MSM8X16_WCD_A_CDC_RX2_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MIXER_E("RX3 CHAIN", MSM8X16_WCD_A_CDC_RX3_B6_CTL, 0, 0,
+ NULL, 0,
+ msm8x16_wcd_codec_enable_rx_chain,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX1 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx2_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP1", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp1_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP2", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp2_mux),
+ SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
+ &rx3_mix1_inp3_mux),
+
+ SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx1_mix2_inp1_mux),
+ SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+ &rx2_mix2_inp1_mux),
+ SND_SOC_DAPM_SUPPLY("CP", MSM8X16_WCD_A_ANALOG_NCP_EN, 0, 0,
+ msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("EAR CP", MSM8X16_WCD_A_ANALOG_NCP_EN, 4, 0,
+ msm8x16_wcd_codec_enable_charge_pump, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("RX_BIAS", SND_SOC_NOPM,
+ 0, 0, msm8x16_wcd_codec_enable_rx_bias,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+ SND_SOC_DAPM_SUPPLY("SPK_RX_BIAS",
+ SND_SOC_NOPM, 0, 0,
+ msm8x16_wcd_codec_enable_rx_bias, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ SND_SOC_DAPM_MUX("RDAC2 MUX", SND_SOC_NOPM, 0, 0, &rdac2_mux),
+ SND_SOC_DAPM_SUPPLY("RX_I2S_CLK",
+ MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL, 4, 0, NULL, 0),
+};
+
+
+static struct snd_soc_codec_driver msm8x16_wcd_codec = {
+ .probe = msm8x16_wcd_codec_probe,
+ .remove = msm8x16_wcd_codec_remove,
+ .read = msm8x16_wcd_read,
+ .write = msm8x16_wcd_write,
+ .reg_cache_size = MSM8X16_WCD_CACHE_SIZE,
+ .reg_cache_default = msm8x16_wcd_reset_reg_defaults,
+ .reg_word_size = 1,
+ .controls = msm8x16_wcd_snd_controls,
+ .num_controls = ARRAY_SIZE(msm8x16_wcd_snd_controls),
+ .dapm_widgets = msm8x16_wcd_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(msm8x16_wcd_dapm_widgets),
+ .dapm_routes = audio_map,
+ .num_dapm_routes = ARRAY_SIZE(audio_map),
+};
+
+static int msm8x16_wcd_codec_parse_dt(struct platform_device *pdev,
+ struct wcd_chip *chip)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+ u32 res[2];
+
+ ret = of_property_read_u32_array(np, "reg", res, 2);
+ if (ret < 0)
+ return ret;
+
+ chip->analog_base = res[0];
+
+ chip->digital_map = syscon_regmap_lookup_by_phandle(np, "digital");
+ if (IS_ERR(chip->digital_map))
+ return PTR_ERR(chip->digital_map);
+
+ chip->vddio = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(chip->vddio)) {
+ dev_err(dev, "Failed to get vdd supply\n");
+ return PTR_ERR(chip->vddio);
+ }
+
+ chip->vdd_pa = devm_regulator_get(dev, "vdd-pa");
+ if (IS_ERR(chip->vdd_pa)) {
+ dev_err(dev, "Failed to get vdd supply\n");
+ return PTR_ERR(chip->vdd_pa);
+ }
+
+ chip->mclk = devm_clk_get(dev, "mclk");
+
+ return 0;
+}
+
+static int wcd_probe(struct platform_device *pdev)
+{
+ struct wcd_chip *chip;
+ struct device *dev = &pdev->dev;
+
+ chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->analog_map = dev_get_regmap(dev->parent, NULL);
+ if (!chip->analog_map)
+ return -ENXIO;
+
+ msm8x16_wcd_codec_parse_dt(pdev, chip);
+ clk_set_rate(chip->mclk, 9600000);
+ clk_prepare_enable(chip->mclk);
+
+ dev_set_drvdata(dev, chip);
+
+ return snd_soc_register_codec(dev, &msm8x16_wcd_codec,
+ msm8x16_wcd_codec_dai,
+ ARRAY_SIZE(msm8x16_wcd_codec_dai));
+}
+
+static int wcd_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id wcd_match_table[] = {
+ { .compatible = "qcom,apq8016-wcd-codec" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wcd_match_table);
+
+static struct platform_driver wcd_driver = {
+ .driver = {
+ .name = "spmi-wcd-codec",
+ .of_match_table = wcd_match_table,
+ },
+ .probe = wcd_probe,
+ .remove = wcd_remove,
+};
+module_platform_driver(wcd_driver);
+
+MODULE_ALIAS("platform:spmi-wcd-codec");
+MODULE_DESCRIPTION("SPMI PMIC WCD codec driver");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/msm8x16-wcd.h b/sound/soc/codecs/msm8x16-wcd.h
new file mode 100644
index 000000000000..ad4c9d0c5ae6
--- /dev/null
+++ b/sound/soc/codecs/msm8x16-wcd.h
@@ -0,0 +1,234 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM8X16_WCD_H
+#define MSM8X16_WCD_H
+
+#include <linux/types.h>
+
+#define MSM8X16_WCD_NUM_REGISTERS 0x6FF
+#define MSM8X16_WCD_MAX_REGISTER (MSM8X16_WCD_NUM_REGISTERS-1)
+#define MSM8X16_WCD_CACHE_SIZE MSM8X16_WCD_NUM_REGISTERS
+#define MSM8X16_WCD_NUM_IRQ_REGS 2
+#define MAX_REGULATOR 7
+#define MSM8X16_WCD_REG_VAL(reg, val) {reg, 0, val}
+#define MSM8X16_TOMBAK_LPASS_AUDIO_CORE_DIG_CODEC_CLK_SEL 0xFE03B004
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CMD_RCGR 0x0181C09C
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CFG_RCGR 0x0181C0A0
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_M 0x0181C0A4
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_N 0x0181C0A8
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_D 0x0181C0AC
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_CBCR 0x0181C0B0
+#define MSM8X16_TOMBAK_LPASS_DIGCODEC_AHB_CBCR 0x0181C0B4
+
+#define MSM8X16_CODEC_NAME "msm8x16_wcd_codec"
+
+#define MSM8X16_WCD_IS_DIGITAL_REG(reg) \
+ (((reg >= 0x200) && (reg <= 0x4FF)) ? 1 : 0)
+#define MSM8X16_WCD_IS_TOMBAK_REG(reg) \
+ (((reg >= 0x000) && (reg <= 0x1FF)) ? 1 : 0)
+/*
+ * MCLK activity indicators during suspend and resume call
+ */
+#define MCLK_SUS_DIS 1
+#define MCLK_SUS_RSC 2
+#define MCLK_SUS_NO_ACT 3
+
+#define NUM_DECIMATORS 2
+
+extern const u8 msm8x16_wcd_reg_readable[MSM8X16_WCD_CACHE_SIZE];
+extern const u8 msm8x16_wcd_reg_readonly[MSM8X16_WCD_CACHE_SIZE];
+extern u8 msm8x16_wcd_reset_reg_defaults[MSM8X16_WCD_CACHE_SIZE];
+
+enum msm8x16_wcd_pid_current {
+ MSM8X16_WCD_PID_MIC_2P5_UA,
+ MSM8X16_WCD_PID_MIC_5_UA,
+ MSM8X16_WCD_PID_MIC_10_UA,
+ MSM8X16_WCD_PID_MIC_20_UA,
+};
+
+struct msm8x16_wcd_reg_mask_val {
+ u16 reg;
+ u8 mask;
+ u8 val;
+};
+
+enum msm8x16_wcd_mbhc_analog_pwr_cfg {
+ MSM8X16_WCD_ANALOG_PWR_COLLAPSED = 0,
+ MSM8X16_WCD_ANALOG_PWR_ON,
+ MSM8X16_WCD_NUM_ANALOG_PWR_CONFIGS,
+};
+
+/* Number of input and output I2S port */
+enum {
+ MSM8X16_WCD_RX1 = 0,
+ MSM8X16_WCD_RX2,
+ MSM8X16_WCD_RX3,
+ MSM8X16_WCD_RX_MAX,
+};
+
+enum {
+ MSM8X16_WCD_TX1 = 0,
+ MSM8X16_WCD_TX2,
+ MSM8X16_WCD_TX3,
+ MSM8X16_WCD_TX4,
+ MSM8X16_WCD_TX_MAX,
+};
+
+enum {
+ /* INTR_REG 0 - Digital Periph */
+ MSM8X16_WCD_IRQ_SPKR_CNP = 0,
+ MSM8X16_WCD_IRQ_SPKR_CLIP,
+ MSM8X16_WCD_IRQ_SPKR_OCP,
+ MSM8X16_WCD_IRQ_MBHC_INSREM_DET1,
+ MSM8X16_WCD_IRQ_MBHC_RELEASE,
+ MSM8X16_WCD_IRQ_MBHC_PRESS,
+ MSM8X16_WCD_IRQ_MBHC_INSREM_DET,
+ MSM8X16_WCD_IRQ_MBHC_HS_DET,
+ /* INTR_REG 1 - Analog Periph */
+ MSM8X16_WCD_IRQ_EAR_OCP,
+ MSM8X16_WCD_IRQ_HPHR_OCP,
+ MSM8X16_WCD_IRQ_HPHL_OCP,
+ MSM8X16_WCD_IRQ_EAR_CNP,
+ MSM8X16_WCD_IRQ_HPHR_CNP,
+ MSM8X16_WCD_IRQ_HPHL_CNP,
+ MSM8X16_WCD_NUM_IRQS,
+};
+
+enum wcd_notify_event {
+ WCD_EVENT_INVALID,
+ /* events for micbias ON and OFF */
+ WCD_EVENT_PRE_MICBIAS_2_OFF,
+ WCD_EVENT_POST_MICBIAS_2_OFF,
+ WCD_EVENT_PRE_MICBIAS_2_ON,
+ WCD_EVENT_POST_MICBIAS_2_ON,
+ /* events for PA ON and OFF */
+ WCD_EVENT_PRE_HPHL_PA_ON,
+ WCD_EVENT_POST_HPHL_PA_OFF,
+ WCD_EVENT_PRE_HPHR_PA_ON,
+ WCD_EVENT_POST_HPHR_PA_OFF,
+ WCD_EVENT_LAST,
+};
+
+enum {
+ ON_DEMAND_MICBIAS = 0,
+ ON_DEMAND_SUPPLIES_MAX,
+};
+
+/*
+ * The delay list is per codec HW specification.
+ * Please add delay in the list in the future instead
+ * of magic number
+ */
+enum {
+ CODEC_DELAY_1_MS = 1000,
+ CODEC_DELAY_1_1_MS = 1100,
+};
+#if 0
+struct msm8x16_wcd_regulator {
+ const char *name;
+ int min_uv;
+ int max_uv;
+ int optimum_ua;
+ bool ondemand;
+ struct regulator *regulator;
+};
+
+struct msm8916_asoc_mach_data {
+ int codec_type;
+ int ext_pa;
+ int us_euro_gpio;
+ int mclk_freq;
+ int lb_mode;
+ atomic_t mclk_rsc_ref;
+ atomic_t mclk_enabled;
+ struct mutex cdc_mclk_mutex;
+ struct delayed_work disable_mclk_work;
+ struct afe_digital_clk_cfg digital_cdc_clk;
+};
+
+struct msm8x16_wcd_pdata {
+ int irq;
+ int irq_base;
+ int num_irqs;
+ int reset_gpio;
+ void *msm8x16_wcd_ahb_base_vaddr;
+ struct wcd9xxx_micbias_setting micbias;
+ struct msm8x16_wcd_regulator regulator[MAX_REGULATOR];
+ u32 mclk_rate;
+};
+
+enum msm8x16_wcd_micbias_num {
+ MSM8X16_WCD_MICBIAS1 = 0,
+};
+
+struct msm8x16_wcd {
+ struct device *dev;
+ struct mutex io_lock;
+ u8 version;
+
+ int reset_gpio;
+ int (*read_dev)(struct snd_soc_codec *codec,
+ unsigned short reg);
+ int (*write_dev)(struct snd_soc_codec *codec,
+ unsigned short reg, u8 val);
+
+ u32 num_of_supplies;
+ struct regulator_bulk_data *supplies;
+
+ u8 idbyte[4];
+
+ int num_irqs;
+ u32 mclk_rate;
+ char __iomem *dig_base;
+};
+
+struct on_demand_supply {
+ struct regulator *supply;
+ atomic_t ref;
+};
+
+struct msm8x16_wcd_priv {
+ struct snd_soc_codec *codec;
+ u16 pmic_rev;
+ u32 adc_count;
+ u32 rx_bias_count;
+ s32 dmic_1_2_clk_cnt;
+ u32 mute_mask;
+ bool mclk_enabled;
+ bool clock_active;
+ bool config_mode_active;
+ bool spk_boost_set;
+ bool ear_pa_boost_set;
+ bool dec_active[NUM_DECIMATORS];
+ struct on_demand_supply on_demand_list[ON_DEMAND_SUPPLIES_MAX];
+ /* mbhc module */
+ struct wcd_mbhc mbhc;
+ struct blocking_notifier_head notifier;
+
+};
+
+extern int msm8x16_wcd_mclk_enable(struct snd_soc_codec *codec, int mclk_enable,
+ bool dapm);
+
+extern int msm8x16_wcd_hs_detect(struct snd_soc_codec *codec,
+ struct wcd_mbhc_config *mbhc_cfg);
+
+extern void msm8x16_wcd_hs_detect_exit(struct snd_soc_codec *codec);
+
+extern int msm8x16_register_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nblock);
+
+extern int msm8x16_unregister_notifier(struct snd_soc_codec *codec,
+ struct notifier_block *nblock);
+#endif
+#endif
+
diff --git a/sound/soc/codecs/msm8x16_wcd_registers.h b/sound/soc/codecs/msm8x16_wcd_registers.h
new file mode 100644
index 000000000000..03d92c844e49
--- /dev/null
+++ b/sound/soc/codecs/msm8x16_wcd_registers.h
@@ -0,0 +1,518 @@
+ /* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM8X16_WCD_REGISTERS_H
+#define MSM8X16_WCD_REGISTERS_H
+
+#define MSM8X16_WCD_A_DIGITAL_REVISION1 (0x000)
+#define MSM8X16_WCD_A_DIGITAL_REVISION1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_REVISION2 (0x001)
+#define MSM8X16_WCD_A_DIGITAL_REVISION2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_TYPE (0x004)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_TYPE__POR (0x23)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE (0x005)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_SUBTYPE__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_INT_RT_STS (0x010)
+#define MSM8X16_WCD_A_DIGITAL_INT_RT_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE (0x011)
+#define MSM8X16_WCD_A_DIGITAL_INT_SET_TYPE__POR (0xFF)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH (0x012)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_HIGH__POR (0xFF)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW (0x013)
+#define MSM8X16_WCD_A_DIGITAL_INT_POLARITY_LOW__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR (0x014)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_CLR__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_SET (0x015)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_SET__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_CLR (0x016)
+#define MSM8X16_WCD_A_DIGITAL_INT_EN_CLR__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS (0x018)
+#define MSM8X16_WCD_A_DIGITAL_INT_LATCHED_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS (0x019)
+#define MSM8X16_WCD_A_DIGITAL_INT_PENDING_STS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_MID_SEL (0x01A)
+#define MSM8X16_WCD_A_DIGITAL_INT_MID_SEL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_PRIORITY (0x01B)
+#define MSM8X16_WCD_A_DIGITAL_INT_PRIORITY__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_GPIO_MODE (0x040)
+#define MSM8X16_WCD_A_DIGITAL_GPIO_MODE__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE (0x041)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_OE__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA (0x042)
+#define MSM8X16_WCD_A_DIGITAL_PIN_CTL_DATA__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PIN_STATUS (0x043)
+#define MSM8X16_WCD_A_DIGITAL_PIN_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL (0x044)
+#define MSM8X16_WCD_A_DIGITAL_HDRIVE_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL (0x046)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RST_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL (0x048)
+#define MSM8X16_WCD_A_DIGITAL_CDC_TOP_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL (0x049)
+#define MSM8X16_WCD_A_DIGITAL_CDC_ANA_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL (0x04A)
+#define MSM8X16_WCD_A_DIGITAL_CDC_DIG_CLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL (0x050)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX1_CTL__POR (0x02)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL (0x051)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_TX2_CTL__POR (0x02)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL (0x052)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_HPHR_DAC_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL (0x053)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL (0x054)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL (0x055)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL (0x056)
+#define MSM8X16_WCD_A_DIGITAL_CDC_CONN_RX_LB_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1 (0x058)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL1__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2 (0x059)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL2__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3 (0x05A)
+#define MSM8X16_WCD_A_DIGITAL_CDC_RX_CTL3__POR (0x7C)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0 (0x05B)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA0__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1 (0x05C)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2 (0x05D)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3 (0x05E)
+#define MSM8X16_WCD_A_DIGITAL_DEM_BYPASS_DATA3__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL (0x068)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_CTL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_EN (0x069)
+#define MSM8X16_WCD_A_DIGITAL_DIG_DEBUG_EN__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_0 (0x070)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_0__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_1 (0x071)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_2 (0x072)
+#define MSM8X16_WCD_A_DIGITAL_SPARE_2__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_SEC_ACCESS (0x0D0)
+#define MSM8X16_WCD_A_DIGITAL_SEC_ACCESS__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL1 (0x0D8)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL2 (0x0D9)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL2__POR (0x01)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3 (0x0DA)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL3__POR (0x05)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4 (0x0DB)
+#define MSM8X16_WCD_A_DIGITAL_PERPH_RESET_CTL4__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST1 (0x0E0)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST1__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST_VAL (0x0E1)
+#define MSM8X16_WCD_A_DIGITAL_INT_TEST_VAL__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_NUM (0x0F0)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_NUM__POR (0x00)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_CTRL (0x0F1)
+#define MSM8X16_WCD_A_DIGITAL_TRIM_CTRL__POR (0x00)
+
+#define MSM8X16_WCD_A_ANALOG_REVISION1 (0x100)
+#define MSM8X16_WCD_A_ANALOG_REVISION1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION2 (0x101)
+#define MSM8X16_WCD_A_ANALOG_REVISION2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION3 (0x102)
+#define MSM8X16_WCD_A_ANALOG_REVISION3__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_REVISION4 (0x103)
+#define MSM8X16_WCD_A_ANALOG_REVISION4__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_TYPE (0x104)
+#define MSM8X16_WCD_A_ANALOG_PERPH_TYPE__POR (0x23)
+#define MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE (0x105)
+#define MSM8X16_WCD_A_ANALOG_PERPH_SUBTYPE__POR (0x09)
+#define MSM8X16_WCD_A_ANALOG_INT_RT_STS (0x110)
+#define MSM8X16_WCD_A_ANALOG_INT_RT_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_SET_TYPE (0x111)
+#define MSM8X16_WCD_A_ANALOG_INT_SET_TYPE__POR (0x3F)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH (0x112)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_HIGH__POR (0x3F)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW (0x113)
+#define MSM8X16_WCD_A_ANALOG_INT_POLARITY_LOW__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR (0x114)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_CLR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_SET (0x115)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_SET__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_CLR (0x116)
+#define MSM8X16_WCD_A_ANALOG_INT_EN_CLR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS (0x118)
+#define MSM8X16_WCD_A_ANALOG_INT_LATCHED_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_PENDING_STS (0x119)
+#define MSM8X16_WCD_A_ANALOG_INT_PENDING_STS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_MID_SEL (0x11A)
+#define MSM8X16_WCD_A_ANALOG_INT_MID_SEL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_PRIORITY (0x11B)
+#define MSM8X16_WCD_A_ANALOG_INT_PRIORITY__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_EN (0x140)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_EN__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_VAL (0x141)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_VAL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_CTL (0x142)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS (0x143)
+#define MSM8X16_WCD_A_ANALOG_MICB_1_INT_RBIAS__POR (0x49)
+#define MSM8X16_WCD_A_ANALOG_MICB_2_EN (0x144)
+#define MSM8X16_WCD_A_ANALOG_MICB_2_EN__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL_2 (0x145)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL_2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL (0x146)
+#define MSM8X16_WCD_A_ANALOG_MASTER_BIAS_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1 (0x147)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_1__POR (0x35)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2 (0x150)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DET_CTL_2__POR (0x08)
+#define MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL (0x151)
+#define MSM8X16_WCD_A_ANALOG_MBHC_FSM_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER (0x152)
+#define MSM8X16_WCD_A_ANALOG_MBHC_DBNC_TIMER__POR (0x98)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL (0x153)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN0_ZDETL_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL (0x154)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN1_ZDETM_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL (0x155)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN2_ZDETH_CTL__POR (0x40)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL (0x156)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN3_CTL__POR (0x61)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL (0x157)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN4_CTL__POR (0x80)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT (0x158)
+#define MSM8X16_WCD_A_ANALOG_MBHC_BTN_RESULT__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT (0x159)
+#define MSM8X16_WCD_A_ANALOG_MBHC_ZDET_ELECT_RESULT__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TX_1_EN (0x160)
+#define MSM8X16_WCD_A_ANALOG_TX_1_EN__POR (0x03)
+#define MSM8X16_WCD_A_ANALOG_TX_2_EN (0x161)
+#define MSM8X16_WCD_A_ANALOG_TX_2_EN__POR (0x03)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1 (0x162)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_1__POR (0xBF)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2 (0x163)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TEST_CTL_2__POR (0x8C)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL (0x164)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_ATEST_CTL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS (0x165)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_OPAMP_BIAS__POR (0x6B)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV (0x166)
+#define MSM8X16_WCD_A_ANALOG_TX_1_2_TXFE_CLKDIV__POR (0x51)
+#define MSM8X16_WCD_A_ANALOG_TX_3_EN (0x167)
+#define MSM8X16_WCD_A_ANALOG_TX_3_EN__POR (0x02)
+#define MSM8X16_WCD_A_ANALOG_NCP_EN (0x180)
+#define MSM8X16_WCD_A_ANALOG_NCP_EN__POR (0x26)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLK (0x181)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLK__POR (0x23)
+#define MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH (0x182)
+#define MSM8X16_WCD_A_ANALOG_NCP_DEGLITCH__POR (0x5B)
+#define MSM8X16_WCD_A_ANALOG_NCP_FBCTRL (0x183)
+#define MSM8X16_WCD_A_ANALOG_NCP_FBCTRL__POR (0x08)
+#define MSM8X16_WCD_A_ANALOG_NCP_BIAS (0x184)
+#define MSM8X16_WCD_A_ANALOG_NCP_BIAS__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_NCP_VCTRL (0x185)
+#define MSM8X16_WCD_A_ANALOG_NCP_VCTRL__POR (0x24)
+#define MSM8X16_WCD_A_ANALOG_NCP_TEST (0x186)
+#define MSM8X16_WCD_A_ANALOG_NCP_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR (0x187)
+#define MSM8X16_WCD_A_ANALOG_NCP_CLIM_ADDR__POR (0xD5)
+#define MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER (0x190)
+#define MSM8X16_WCD_A_ANALOG_RX_CLOCK_DIVIDER__POR (0xE8)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL (0x191)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_CTL__POR (0xCF)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT (0x192)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_OCP_COUNT__POR (0x6E)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC (0x193)
+#define MSM8X16_WCD_A_ANALOG_RX_COM_BIAS_DAC__POR (0x10)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA (0x194)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_PA__POR (0x5A)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP (0x195)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_LDO_OCP__POR (0x69)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP (0x196)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_BIAS_CNP__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN (0x197)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_EN__POR (0x80)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL (0x198)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_CTL__POR (0xDA)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME (0x199)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_CNP_WG_TIME__POR (0x16)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST (0x19A)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL (0x19B)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_L_PA_DAC_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST (0x19C)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_TEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL (0x19D)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_R_PA_DAC_CTL__POR (0x20)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_CTL (0x19E)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_CTL___POR (0x12)
+#define MSM8X16_WCD_A_ANALOG_RX_ATEST (0x19F)
+#define MSM8X16_WCD_A_ANALOG_RX_ATEST__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS (0x1A0)
+#define MSM8X16_WCD_A_ANALOG_RX_HPH_STATUS__POR (0x0C)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS (0x1A1)
+#define MSM8X16_WCD_A_ANALOG_RX_EAR_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL (0x1B0)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DAC_CTL__POR (0x83)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET (0x1B1)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CLIP_DET__POR (0x91)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL (0x1B2)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_CTL__POR (0x29)
+#define MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET (0x1B3)
+#define MSM8X16_WCD_A_ANALOG_SPKR_ANA_BIAS_SET__POR (0x4D)
+#define MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL (0x1B4)
+#define MSM8X16_WCD_A_ANALOG_SPKR_OCP_CTL__POR (0xE1)
+#define MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL (0x1B5)
+#define MSM8X16_WCD_A_ANALOG_SPKR_PWRSTG_CTL__POR (0x1E)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC (0x1B6)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_MISC__POR (0xCB)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG (0x1B7)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_DBG__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT (0x1C0)
+#define MSM8X16_WCD_A_ANALOG_CURRENT_LIMIT__POR (0x02)
+#define MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE (0x1C1)
+#define MSM8X16_WCD_A_ANALOG_OUTPUT_VOLTAGE__POR (0x14)
+#define MSM8X16_WCD_A_ANALOG_BYPASS_MODE (0x1C2)
+#define MSM8X16_WCD_A_ANALOG_BYPASS_MODE__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL (0x1C3)
+#define MSM8X16_WCD_A_ANALOG_BOOST_EN_CTL__POR (0x1F)
+#define MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO (0x1C4)
+#define MSM8X16_WCD_A_ANALOG_SLOPE_COMP_IP_ZERO__POR (0x8C)
+#define MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE (0x1C5)
+#define MSM8X16_WCD_A_ANALOG_RDSON_MAX_DUTY_CYCLE__POR (0xC0)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1 (0x1C6)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST1_1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST_2 (0x1C7)
+#define MSM8X16_WCD_A_ANALOG_BOOST_TEST_2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS (0x1C8)
+#define MSM8X16_WCD_A_ANALOG_SPKR_SAR_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS (0x1C9)
+#define MSM8X16_WCD_A_ANALOG_SPKR_DRV_STATUS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR (0x1CE)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_CSR__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL (0x1CF)
+#define MSM8X16_WCD_A_ANALOG_PBUS_ADD_SEL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_SEC_ACCESS (0x1D0)
+#define MSM8X16_WCD_A_ANALOG_SEC_ACCESS__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL1 (0x1D8)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL2 (0x1D9)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL2__POR (0x01)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3 (0x1DA)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL3__POR (0x05)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4 (0x1DB)
+#define MSM8X16_WCD_A_ANALOG_PERPH_RESET_CTL4__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST1 (0x1E0)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST_VAL (0x1E1)
+#define MSM8X16_WCD_A_ANALOG_INT_TEST_VAL__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_NUM (0x1F0)
+#define MSM8X16_WCD_A_ANALOG_TRIM_NUM__POR (0x04)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL1 (0x1F1)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL1__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL2 (0x1F2)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL2__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL3 (0x1F3)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL3__POR (0x00)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL4 (0x1F4)
+#define MSM8X16_WCD_A_ANALOG_TRIM_CTRL4__POR (0x00)
+
+/* Digital part */
+#define MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL (0x200)
+#define MSM8X16_WCD_A_CDC_CLK_RX_RESET_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL (0x204)
+#define MSM8X16_WCD_A_CDC_CLK_TX_RESET_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL (0x208)
+#define MSM8X16_WCD_A_CDC_CLK_DMIC_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL (0x20C)
+#define MSM8X16_WCD_A_CDC_CLK_RX_I2S_CTL__POR (0x13)
+#define MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL (0x210)
+#define MSM8X16_WCD_A_CDC_CLK_TX_I2S_CTL__POR (0x13)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL (0x214)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_RESET_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL (0x218)
+#define MSM8X16_WCD_A_CDC_CLK_TX_CLK_EN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_CTL (0x21C)
+#define MSM8X16_WCD_A_CDC_CLK_OTHR_CTL__POR (0x04)
+#define MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL (0x220)
+#define MSM8X16_WCD_A_CDC_CLK_RX_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_MCLK_CTL (0x224)
+#define MSM8X16_WCD_A_CDC_CLK_MCLK_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_PDM_CTL (0x228)
+#define MSM8X16_WCD_A_CDC_CLK_PDM_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CLK_SD_CTL (0x22C)
+#define MSM8X16_WCD_A_CDC_CLK_SD_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B1_CTL (0x240)
+#define MSM8X16_WCD_A_CDC_RX1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B1_CTL (0x260)
+#define MSM8X16_WCD_A_CDC_RX2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B1_CTL (0x280)
+#define MSM8X16_WCD_A_CDC_RX3_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B2_CTL (0x244)
+#define MSM8X16_WCD_A_CDC_RX1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B2_CTL (0x264)
+#define MSM8X16_WCD_A_CDC_RX2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B2_CTL (0x284)
+#define MSM8X16_WCD_A_CDC_RX3_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B3_CTL (0x248)
+#define MSM8X16_WCD_A_CDC_RX1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B3_CTL (0x268)
+#define MSM8X16_WCD_A_CDC_RX2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B3_CTL (0x288)
+#define MSM8X16_WCD_A_CDC_RX3_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B4_CTL (0x24C)
+#define MSM8X16_WCD_A_CDC_RX1_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B4_CTL (0x26C)
+#define MSM8X16_WCD_A_CDC_RX2_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B4_CTL (0x28C)
+#define MSM8X16_WCD_A_CDC_RX3_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_B5_CTL (0x250)
+#define MSM8X16_WCD_A_CDC_RX1_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX2_B5_CTL (0x270)
+#define MSM8X16_WCD_A_CDC_RX2_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX3_B5_CTL (0x290)
+#define MSM8X16_WCD_A_CDC_RX3_B5_CTL__POR (0x68)
+#define MSM8X16_WCD_A_CDC_RX1_B6_CTL (0x254)
+#define MSM8X16_WCD_A_CDC_RX1_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_B6_CTL (0x274)
+#define MSM8X16_WCD_A_CDC_RX2_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_B6_CTL (0x294)
+#define MSM8X16_WCD_A_CDC_RX3_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL (0x258)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL (0x278)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL (0x298)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL (0x25C)
+#define MSM8X16_WCD_A_CDC_RX1_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL (0x27C)
+#define MSM8X16_WCD_A_CDC_RX2_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL (0x29C)
+#define MSM8X16_WCD_A_CDC_RX3_VOL_CTL_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE (0x2A0)
+#define MSM8X16_WCD_A_CDC_TOP_GAIN_UPDATE__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TOP_CTL (0x2A4)
+#define MSM8X16_WCD_A_CDC_TOP_CTL__POR (0x01)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL (0x2E0)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL (0x2E4)
+#define MSM8X16_WCD_A_CDC_DEBUG_DESER2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B1_CTL_CFG (0x2E8)
+#define MSM8X16_WCD_A_CDC_DEBUG_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B2_CTL_CFG (0x2EC)
+#define MSM8X16_WCD_A_CDC_DEBUG_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_DEBUG_B3_CTL_CFG (0x2F0)
+#define MSM8X16_WCD_A_CDC_DEBUG_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL (0x300)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL (0x340)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL (0x304)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL (0x344)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL (0x308)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL (0x348)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL (0x30C)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL (0x34C)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL (0x310)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B5_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL (0x350)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B5_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL (0x314)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL (0x354)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B6_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL (0x318)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B7_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL (0x358)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B7_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL (0x31C)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_B8_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL (0x35C)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_B8_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_CTL (0x320)
+#define MSM8X16_WCD_A_CDC_IIR1_CTL__POR (0x40)
+#define MSM8X16_WCD_A_CDC_IIR2_CTL (0x360)
+#define MSM8X16_WCD_A_CDC_IIR2_CTL__POR (0x40)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL (0x324)
+#define MSM8X16_WCD_A_CDC_IIR1_GAIN_TIMER_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL (0x364)
+#define MSM8X16_WCD_A_CDC_IIR2_GAIN_TIMER_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL (0x328)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL (0x368)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL (0x32C)
+#define MSM8X16_WCD_A_CDC_IIR1_COEF_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL (0x36C)
+#define MSM8X16_WCD_A_CDC_IIR2_COEF_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL (0x380)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL (0x384)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL (0x388)
+#define MSM8X16_WCD_A_CDC_CONN_RX1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL (0x38C)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL (0x390)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL (0x394)
+#define MSM8X16_WCD_A_CDC_CONN_RX2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL (0x398)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL (0x39C)
+#define MSM8X16_WCD_A_CDC_CONN_RX3_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL (0x3A0)
+#define MSM8X16_WCD_A_CDC_CONN_TX_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL (0x3A8)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL (0x3AC)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL (0x3B0)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL (0x3B4)
+#define MSM8X16_WCD_A_CDC_CONN_EQ1_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL (0x3B8)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL (0x3BC)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B2_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL (0x3C0)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B3_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL (0x3C4)
+#define MSM8X16_WCD_A_CDC_CONN_EQ2_B4_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL (0x3C8)
+#define MSM8X16_WCD_A_CDC_CONN_TX_I2S_SD1_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER (0x480)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_TIMER__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER (0x4A0)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_TIMER__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN (0x484)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_GAIN__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN (0x4A4)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_GAIN__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG (0x488)
+#define MSM8X16_WCD_A_CDC_TX1_VOL_CTL_CFG__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG (0x4A8)
+#define MSM8X16_WCD_A_CDC_TX2_VOL_CTL_CFG__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_MUX_CTL (0x48C)
+#define MSM8X16_WCD_A_CDC_TX1_MUX_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_MUX_CTL (0x4AC)
+#define MSM8X16_WCD_A_CDC_TX2_MUX_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL (0x490)
+#define MSM8X16_WCD_A_CDC_TX1_CLK_FS_CTL__POR (0x03)
+#define MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL (0x4B0)
+#define MSM8X16_WCD_A_CDC_TX2_CLK_FS_CTL__POR (0x03)
+#define MSM8X16_WCD_A_CDC_TX1_DMIC_CTL (0x494)
+#define MSM8X16_WCD_A_CDC_TX1_DMIC_CTL__POR (0x00)
+#define MSM8X16_WCD_A_CDC_TX2_DMIC_CTL (0x4B4)
+#define MSM8X16_WCD_A_CDC_TX2_DMIC_CTL__POR (0x00)
+#endif