aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2020-01-02 22:15:58 -0800
committerLinux Build Service Account <lnxbuild@localhost>2020-01-02 22:15:58 -0800
commitd711527304c5fe9ee24a82e0f999d9b262f8b36a (patch)
tree45788c9303541a4a41d3b6594f2e6bb78bbd1bc8
parent039a6d18d9a50ff3e4113226f6d31898701c918b (diff)
parent4accd48caa3578357f54471023066b5705a91f15 (diff)
Merge 4accd48caa3578357f54471023066b5705a91f15 on remote branchLA.UM.8.11.r1-02200-NICOBAR.0
Change-Id: Ie2931a10db38e572e1120fe4e149757ec5cbafdc
-rw-r--r--Documentation/devicetree/bindings/bus/mhi.txt6
-rw-r--r--Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt14
-rw-r--r--arch/arm/configs/vendor/sdxprairie-perf_defconfig4
-rw-r--r--arch/arm/configs/vendor/sdxprairie_defconfig4
-rw-r--r--arch/arm/configs/vendor/trinket-perf_defconfig669
-rw-r--r--arch/arm/configs/vendor/trinket_defconfig752
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-coresight.dtsi29
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-idp.dtsi11
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-npu.dtsi3
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-qrd.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi39
-rw-r--r--arch/arm64/boot/dts/qcom/atoll.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/dsi-panel-nt36672c-fhd-plus-video.dtsi113
-rw-r--r--arch/arm64/boot/dts/qcom/qcs405-iot-sku12.dts25
-rw-r--r--arch/arm64/boot/dts/qcom/qcs407-iot-sku12.dts23
-rw-r--r--arch/arm64/boot/dts/qcom/sdx-audio-lpass.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/sdx-wsa881x.dtsi5
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-aqc.dtsi160
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi4
-rw-r--r--arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi12
-rw-r--r--arch/arm64/boot/dts/qcom/sm8150-usb.dtsi1
-rw-r--r--arch/arm64/include/asm/processor.h14
-rw-r--r--arch/arm64/kernel/process.c29
-rw-r--r--drivers/base/regmap/regmap-debugfs.c3
-rw-r--r--drivers/bluetooth/bluetooth-power.c1
-rw-r--r--drivers/bus/mhi/controllers/mhi_arch_qcom.c40
-rw-r--r--drivers/bus/mhi/controllers/mhi_qcom.c24
-rw-r--r--drivers/bus/mhi/controllers/mhi_qcom.h2
-rw-r--r--drivers/bus/mhi/core/mhi_boot.c27
-rw-r--r--drivers/bus/mhi/core/mhi_init.c10
-rw-r--r--drivers/bus/mhi/core/mhi_internal.h4
-rw-r--r--drivers/bus/mhi/core/mhi_main.c92
-rw-r--r--drivers/bus/mhi/core/mhi_pm.c35
-rw-r--r--drivers/char/diag/diagchar.h3
-rw-r--r--drivers/char/diag/diagchar_core.c17
-rw-r--r--drivers/char/diag/diagfwd.c14
-rw-r--r--drivers/char/diag/diagfwd.h3
-rw-r--r--drivers/clk/qcom/gcc-atoll.c2
-rw-r--r--drivers/crypto/msm/ice.c15
-rw-r--r--drivers/devfreq/governor_msm_adreno_tz.c9
-rw-r--r--drivers/devfreq/m4m-hwmon.c4
-rw-r--r--drivers/devfreq/msmcci-hwmon.c4
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h2
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c100
-rw-r--r--drivers/iio/adc/qcom-spmi-adc5.c20
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h5
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c3
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h2
-rw-r--r--drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h5
-rw-r--r--drivers/media/platform/msm/npu/npu_debugfs.c57
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_common.h10
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_debugfs.c56
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_dev.c124
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_host_ipc.c10
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_hw_access.c2
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_mgr.c868
-rw-r--r--drivers/media/platform/msm/npu_v2/npu_mgr.h47
-rw-r--r--drivers/media/platform/msm/vidc/msm_v4l2_private.c6
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c9
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c31
-rw-r--r--drivers/mtd/devices/msm_qpic_nand.c887
-rw-r--r--drivers/mtd/devices/msm_qpic_nand.h27
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c3
-rw-r--r--drivers/pci/host/pci-msm.c123
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs405.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c23
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h5
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_uc.c123
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c22
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h13
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c45
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.c94
-rw-r--r--drivers/platform/msm/mhi_dev/mhi.h2
-rw-r--r--drivers/platform/msm/mhi_dev/mhi_uci.c70
-rw-r--r--drivers/power/supply/qcom/qg-core.h1
-rw-r--r--drivers/power/supply/qcom/qpnp-qg.c18
-rw-r--r--drivers/soc/qcom/cdsprm.c4
-rw-r--r--drivers/soc/qcom/glink_probe.c19
-rw-r--r--drivers/soc/qcom/qmi_rmnet.c34
-rw-r--r--drivers/soc/qcom/qmi_rmnet_i.h1
-rw-r--r--drivers/soc/qcom/rq_stats.c4
-rw-r--r--drivers/tty/serial/msm_geni_serial.c13
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c25
-rw-r--r--drivers/usb/dwc3/gadget.c72
-rw-r--r--drivers/video/backlight/qcom-spmi-wled.c19
-rw-r--r--include/linux/bootmem.h4
-rw-r--r--include/linux/mhi.h25
-rw-r--r--include/linux/msm_mhi_dev.h13
-rw-r--r--include/linux/msm_pcie.h29
-rw-r--r--include/soc/qcom/qmi_rmnet.h9
-rw-r--r--include/uapi/linux/msm_ipa.h17
-rw-r--r--include/uapi/linux/msm_npu.h5
-rw-r--r--include/uapi/sound/compress_offload.h2
-rw-r--r--include/uapi/sound/compress_params.h8
-rw-r--r--mm/memblock.c6
-rw-r--r--security/selinux/avc.c54
102 files changed, 4762 insertions, 710 deletions
diff --git a/Documentation/devicetree/bindings/bus/mhi.txt b/Documentation/devicetree/bindings/bus/mhi.txt
index 52c6c932b8de..840f1cddcac1 100644
--- a/Documentation/devicetree/bindings/bus/mhi.txt
+++ b/Documentation/devicetree/bindings/bus/mhi.txt
@@ -41,6 +41,12 @@ Main node properties:
Use "esoc" followed by numbers starting from 0 for external SOC's or
any other relevant names.
+- mhi,sfr-support
+ Usage: optional
+ Value type: <bool>
+ Definition: Set to true if MHI device supports sending subsystem failure
+ reason upon assert in case PCIe link is functional.
+
============================
mhi channel node properties:
============================
diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
index 21edaa0cf621..d5c50372dee7 100644
--- a/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
+++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-qcom-geni.txt
@@ -15,8 +15,19 @@ Required properties:
- qcom,wrapper-core: Wrapper QUPv3 core containing this I2C controller.
Optional property:
- - qcom,clk-freq-out : Desired I2C bus clock frequency in Hz.
+ - qcom,clk-freq-out: Desired I2C bus clock frequency in Hz.
When missing default to 400000Hz.
+ - qcom,clk-cfg: Array of <u32>, clk cfg array should have 2nd to 5th parameter as
+ suggested by hardware expert. Standard frequency parameters taken cared by the
+ driver itself. This field is needed only if client freq is not from the i2c standard
+ supported frequencies or to fine tune the existing clock parameters.
+ 1st parameter: clk-freq-out, desired I2C bus clock frequency in Hz.
+ 2nd parameter: clk_div, desired I2C bus divider value.
+ 3rd parameter: t_high, desired HIGH period of SCL clock.
+ 4th parameter: t_low, desired LOW period of SCL clock.
+ 5th parameter: t_cycle, desired clock cycle.
+
+ Note: Both qcom,clk-freq-out and qcom,clk-cfg should not be specified at the same time.
Child nodes should conform to i2c bus binding.
@@ -37,4 +48,5 @@ i2c@a94000 {
#size-cells = <0>;
qcom,wrapper-core = <&qupv3_0>;
qcom,clk-freq-out = <400000>;
+ qcom,clk-cfg = <400000 2 5 12 24>; //optional to qcom,clk-freq-out
};
diff --git a/arch/arm/configs/vendor/sdxprairie-perf_defconfig b/arch/arm/configs/vendor/sdxprairie-perf_defconfig
index b3d3a172c1a7..248f80d59b2f 100644
--- a/arch/arm/configs/vendor/sdxprairie-perf_defconfig
+++ b/arch/arm/configs/vendor/sdxprairie-perf_defconfig
@@ -35,7 +35,9 @@ CONFIG_ARCH_SDXPRAIRIE=y
CONFIG_PCI_MSM=y
CONFIG_PCI_MSM_MSI=y
CONFIG_PREEMPT=y
+CONFIG_ARM_MODULE_PLTS=y
CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
@@ -179,6 +181,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_MSM_QPIC_NAND=y
CONFIG_MTD_NAND=y
CONFIG_MTD_UBI=y
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QSEECOM=y
@@ -467,6 +470,7 @@ CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_CMAC=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m
CONFIG_CRYPTO_DEV_QCRYPTO=m
diff --git a/arch/arm/configs/vendor/sdxprairie_defconfig b/arch/arm/configs/vendor/sdxprairie_defconfig
index c2498f6d0777..52c84e7a185f 100644
--- a/arch/arm/configs/vendor/sdxprairie_defconfig
+++ b/arch/arm/configs/vendor/sdxprairie_defconfig
@@ -35,7 +35,9 @@ CONFIG_ARCH_SDXPRAIRIE=y
CONFIG_PCI_MSM=y
CONFIG_PCI_MSM_MSI=y
CONFIG_PREEMPT=y
+CONFIG_ARM_MODULE_PLTS=y
CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
CONFIG_CPU_FREQ=y
@@ -177,6 +179,7 @@ CONFIG_MTD_BLOCK=y
CONFIG_MTD_MSM_QPIC_NAND=y
CONFIG_MTD_NAND=y
CONFIG_MTD_UBI=y
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_QSEECOM=y
@@ -501,6 +504,7 @@ CONFIG_CRYPTO_CTR=y
CONFIG_CRYPTO_CMAC=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_LZ4=y
CONFIG_CRYPTO_DRBG_HASH=y
CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=m
CONFIG_CRYPTO_DEV_QCRYPTO=m
diff --git a/arch/arm/configs/vendor/trinket-perf_defconfig b/arch/arm/configs/vendor/trinket-perf_defconfig
new file mode 100644
index 000000000000..bb27b3b2c7f9
--- /dev/null
+++ b/arch/arm/configs/vendor/trinket-perf_defconfig
@@ -0,0 +1,669 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_TRINKET=y
+# CONFIG_VDSO is not set
+CONFIG_PCI_MSM=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_SOCKET_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_SOCKET_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_QPNP_MISC=y
+CONFIG_FPR_FPC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_HIMAX_CHIPSET=y
+CONFIG_TOUCHSCREEN_HIMAX_I2C=y
+CONFIG_TOUCHSCREEN_HIMAX_INCELL=y
+CONFIG_TOUCHSCREEN_HIMAX_IC_HX83112=y
+CONFIG_TOUCHSCREEN_HIMAX_DEBUG=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_CORE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TOUCH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DEVICE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TESTING=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_ZEROFLASH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DIAGNOSTICS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_SIMULATOR=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_TRINKET=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_QG=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1390_CHARGE_PUMP=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_PM8008=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_MSM_DUAL_ISP_SYNC=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
+CONFIG_DRM=y
+CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_USB_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_MSM_11AD=m
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_SM_GCC_TRINKET=y
+CONFIG_SM_GPUCC_TRINKET=y
+CONFIG_SM_VIDEOCC_TRINKET=y
+CONFIG_SM_DISPCC_TRINKET=y
+CONFIG_SM_DEBUGCC_TRINKET=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QPNP_PBS=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QSEE_IPC_IRQ=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_QCOM_KGSL=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QTI_MPM=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_IPC_LOGGING=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
diff --git a/arch/arm/configs/vendor/trinket_defconfig b/arch/arm/configs/vendor/trinket_defconfig
new file mode 100644
index 000000000000..72897dae4c33
--- /dev/null
+++ b/arch/arm/configs/vendor/trinket_defconfig
@@ -0,0 +1,752 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_DEBUG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_DEFAULT_USE_ENERGY_AWARE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_TRINKET=y
+# CONFIG_VDSO is not set
+CONFIG_PCI_MSM=y
+CONFIG_PCI_MSM_MSI=y
+CONFIG_SMP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_ARM_PSCI=y
+CONFIG_PREEMPT=y
+CONFIG_HIGHMEM=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_SECCOMP=y
+CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_VFP=y
+CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_SOCKET_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_NF_SOCKET_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_MSM_BT_POWER=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_NFC_NQ=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_WCD_IRQ=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_ZRAM_DEDUP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_HDCP_QSEECOM=y
+CONFIG_QSEECOM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_MEMORY_STATE_TIME=y
+CONFIG_QPNP_MISC=y
+CONFIG_FPR_FPC=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
+CONFIG_SCSI_UFSHCD_CMD_LOGGING=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_HIMAX_CHIPSET=y
+CONFIG_TOUCHSCREEN_HIMAX_I2C=y
+CONFIG_TOUCHSCREEN_HIMAX_INCELL=y
+CONFIG_TOUCHSCREEN_HIMAX_IC_HX83112=y
+CONFIG_TOUCHSCREEN_HIMAX_DEBUG=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_CORE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TOUCH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DEVICE=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_TESTING=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_REFLASH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_RECOVERY=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_ZEROFLASH=y
+CONFIG_TOUCHSCREEN_SYNAPTICS_TCM_DIAGNOSTICS=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_QPNP_POWER_ON=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_MSM_RDBG=m
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_SPMI_SIMULATOR=y
+CONFIG_SLIMBUS_MSM_NGD=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_TRINKET=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_QCOM_DLOAD_MODE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_QPNP_QG=y
+CONFIG_SMB1355_SLAVE_CHARGER=y
+CONFIG_QPNP_SMB5=y
+CONFIG_SMB1390_CHARGE_PUMP=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_LOW_LIMITS=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_THERMAL_LIMITS_DCVS=y
+CONFIG_QTI_VIRTUAL_SENSOR=y
+CONFIG_QTI_QMI_COOLING_DEVICE=y
+CONFIG_REGULATOR_COOLING_DEVICE=y
+CONFIG_QTI_BCL_PMIC5=y
+CONFIG_QTI_BCL_SOC_DRIVER=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_QTI_CX_IPEAK_COOLING_DEVICE=y
+CONFIG_MFD_I2C_PMIC=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_PROXY_CONSUMER=y
+CONFIG_REGULATOR_PM8008=y
+CONFIG_REGULATOR_QPNP_LCDB=y
+CONFIG_REGULATOR_REFGEN=y
+CONFIG_REGULATOR_RPM_SMD=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_MSM_CAMERA=y
+CONFIG_MSM_CAMERA_DEBUG=y
+CONFIG_MSMB_CAMERA=y
+CONFIG_MSMB_CAMERA_DEBUG=y
+CONFIG_MSM_CAMERA_SENSOR=y
+CONFIG_MSM_CPP=y
+CONFIG_MSM_CCI=y
+CONFIG_MSM_CSI20_HEADER=y
+CONFIG_MSM_CSI22_HEADER=y
+CONFIG_MSM_CSI30_HEADER=y
+CONFIG_MSM_CSI31_HEADER=y
+CONFIG_MSM_CSIPHY=y
+CONFIG_MSM_CSID=y
+CONFIG_MSM_EEPROM=y
+CONFIG_MSM_ISPIF=y
+CONFIG_MSM_DUAL_ISP_SYNC=y
+CONFIG_IMX134=y
+CONFIG_IMX132=y
+CONFIG_OV9724=y
+CONFIG_OV5648=y
+CONFIG_GC0339=y
+CONFIG_OV8825=y
+CONFIG_OV8865=y
+CONFIG_s5k4e1=y
+CONFIG_OV12830=y
+CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
+CONFIG_MSMB_JPEG=y
+CONFIG_MSM_FD=y
+CONFIG_MSM_JPEGDMA=y
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_SW=y
+CONFIG_DRM=y
+CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_DRM_SDE_EVTLOG_DEBUG=y
+CONFIG_DRM_SDE_RSC=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_QCOM_SPMI_WLED=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_USB_AUDIO_QMI=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_SONY=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_MSM_HSUSB_PHY=y
+CONFIG_DUAL_ROLE_USB_INTF=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_PD_POLICY=y
+CONFIG_QPNP_USB_PDPHY=y
+CONFIG_MMC=y
+CONFIG_MMC_PERF_PROFILING=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=m
+CONFIG_MMC_RING_BUFFER=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SDHCI_MSM_ICE=y
+CONFIG_MMC_CQ_HCI=y
+CONFIG_LEDS_QPNP_FLASH_V2=y
+CONFIG_LEDS_QPNP_HAPTICS=y
+CONFIG_LEDS_QTI_TRI_LED=y
+CONFIG_LEDS_QPNP_VIBRATOR_LDO=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_QPNP=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_QCOM_GPI_DMA_DEBUG=y
+CONFIG_DEBUG_DMA_BUF_REF=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QPNP_REVID=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_USB_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_DEBUG=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_MSM_11AD=m
+CONFIG_QCOM_MDSS_PLL=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_SPMI_PMIC_CLKDIV=y
+CONFIG_CLOCK_CPU_OSM=y
+CONFIG_SM_GCC_TRINKET=y
+CONFIG_SM_GPUCC_TRINKET=y
+CONFIG_SM_VIDEOCC_TRINKET=y
+CONFIG_SM_DISPCC_TRINKET=y
+CONFIG_SM_DEBUGCC_TRINKET=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_IOMMU_TLBSYNC_DEBUG=y
+CONFIG_ARM_SMMU_TESTBUS_DUMP=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPI=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_WDOG_IPI_ENABLE=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QPNP_PBS=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_MSM_CORE_HANG_DETECT=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_MSM_GLADIATOR_HANG_DETECT=y
+CONFIG_MSM_GLADIATOR_ERP=y
+CONFIG_PANIC_ON_GLADIATOR_ERROR=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
+CONFIG_ICNSS_QMI=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_BUS_SCALING=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QSEE_IPC_IRQ=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+# CONFIG_MSM_JTAGV8 is not set
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_AVTIMER=y
+CONFIG_QCOM_FSA4480_I2C=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_MSM_PERFORMANCE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_QCOM_CDSP_RM=y
+CONFIG_QCOM_CX_IPEAK=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_QCOM_BIMC_BWMON=y
+CONFIG_ARM_MEMLAT_MON=y
+CONFIG_QCOMCCI_HWMON=y
+CONFIG_QCOM_M4M_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
+CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON=y
+CONFIG_DEVFREQ_GOV_MEMLAT=y
+CONFIG_QCOM_DEVFREQ_DEVBW=y
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_PWM_QTI_LPG=y
+CONFIG_QCOM_KGSL=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QTI_MPM=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_NVMEM_SPMI_SDAM=y
+CONFIG_SENSORS_SSC=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_MODULE_LOAD_INFO=y
+CONFIG_DEBUG_INFO=y
+CONFIG_FRAME_WARN=2048
+CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_PANIC_ON=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_PAGE_POISONING_ENABLE_DEFAULT=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_ON_SCHED_BUG=y
+CONFIG_PANIC_ON_RT_THROTTLING=y
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_TORTURE_TEST=m
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAIL_MMC_REQUEST=y
+CONFIG_UFS_FAULT_INJECTION=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_PREEMPTIRQ_EVENTS=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
+CONFIG_MEMTEST=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_REMOTE_ETM_DEFAULT_ENABLE=0
+CONFIG_CORESIGHT_TGU=y
+CONFIG_CORESIGHT_EVENT=y
+CONFIG_PFK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
+CONFIG_CRYPTO_DEV_QCRYPTO=y
+CONFIG_CRYPTO_DEV_QCEDEV=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_XZ_DEC=y
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 00c08ed88f08..b477e3f5faa0 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1392,7 +1392,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
start = (addr - bitmap_base) >> PAGE_SHIFT;
- if (addr + size > bitmap_base + mapping_size) {
+ if ((addr + size - 1 > addr) &&
+ (addr + size - 1 > bitmap_base + mapping_size - 1)) {
/*
* The address range to be freed reaches into the iova
* range of the next bitmap. This should not happen as
diff --git a/arch/arm64/boot/dts/qcom/atoll-coresight.dtsi b/arch/arm64/boot/dts/qcom/atoll-coresight.dtsi
index 2ce211c25e65..83b42d78fe21 100644
--- a/arch/arm64/boot/dts/qcom/atoll-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-coresight.dtsi
@@ -184,7 +184,6 @@
arm,primecell-periphid = <0x0003b968>;
reg = <0x6c47000 0x1000>;
reg-names = "tpdm-base";
- status = "disabled";
coresight-name = "coresight-tpdm-npu";
@@ -204,11 +203,31 @@
reg = <0x6c40000 0x1000>;
reg-names = "tpdm-base";
- status = "disabled";
coresight-name = "coresight-tpdm-npu-llm";
- clocks = <&clock_aop QDSS_CLK>;
- clock-names = "apb_pclk";
+ clocks = <&clock_aop QDSS_CLK>,
+ <&clock_gcc GCC_NPU_AXI_CLK>,
+ <&clock_gcc GCC_NPU_CFG_AHB_CLK>,
+ <&clock_npucc NPU_CC_XO_CLK>,
+ <&clock_npucc NPU_CC_CORE_CLK>,
+ <&clock_npucc NPU_CC_CORE_CLK_SRC>;
+
+ clock-names = "apb_pclk",
+ "npu_axi_clk",
+ "npu_cfg_ahb_clk",
+ "npu_cc_xo_clk",
+ "npu_core_clk",
+ "npu_core_clk_src";
+
+ qcom,proxy-clks = "npu_axi_clk",
+ "npu_cfg_ahb_clk",
+ "npu_cc_xo_clk",
+ "npu_core_clk",
+ "npu_core_clk_src";
+
+ vdd-supply = <&npu_core_gdsc>;
+ vdd_cx-supply = <&VDD_CX_LEVEL>;
+ qcom,proxy-regs ="vdd", "vdd_cx";
port {
tpdm_npu_llm_out_funnel_npu: endpoint {
remote-endpoint =
@@ -225,7 +244,6 @@
coresight-name = "coresight-tpdm-npu-dpm";
- status = "disabled";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
port {
@@ -758,7 +776,6 @@
coresight-name = "coresight-funnel-npu";
- status = "disabled";
clocks = <&clock_aop QDSS_CLK>;
clock-names = "apb_pclk";
ports {
diff --git a/arch/arm64/boot/dts/qcom/atoll-idp.dtsi b/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
index 2cc6e56383f3..334bc12609a7 100644
--- a/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-idp.dtsi
@@ -264,6 +264,17 @@
qcom,platform-reset-gpio = <&pm6150l_gpios 3 0>;
};
+&dsi_nt36672c_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply_no_labibb>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <255>;
+ qcom,platform-te-gpio = <&tlmm 10 0>;
+ qcom,platform-reset-gpio = <&pm6150l_gpios 3 0>;
+ qcom,platform-bklight-en-gpio = <&pm6150l_gpios 10 0>;
+ qcom,platform-en-gpio = <&pm6150l_gpios 4 0>;
+};
+
&qupv3_se0_i2c {
status = "ok";
qcom,clk-freq-out = <1000000>;
diff --git a/arch/arm64/boot/dts/qcom/atoll-npu.dtsi b/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
index 4b292c57ae53..cfb7d73f9cde 100644
--- a/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-npu.dtsi
@@ -67,7 +67,8 @@
#mbox-cells = <1>;
qcom,npubw-devs = <&npu_npu_ddr_bw>, <&npudsp_npu_ddr_bw>;
qcom,npubw-dev-names = "ddr_bw", "dsp_ddr_bw";
- qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
+ qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>,
+ <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_CLK_CTL>;
qcom,npu-pwrlevels {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi b/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
index 47878bc69dbd..c1d450b4dea3 100644
--- a/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-qrd.dtsi
@@ -148,12 +148,12 @@
0x0a 0x184 /* PLL_LOCK_DELAY */
0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
0x40 0x194 /* PLL_BIAS_CONTROL_1 */
- 0x22 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x18 0x198 /* PLL_BIAS_CONTROL_2 */
0x21 0x214 /* PWR_CTRL2 */
0x08 0x220 /* IMP_CTRL1 */
0x58 0x224 /* IMP_CTRL2 */
- 0x67 0x240 /* TUNE1 */
- 0x29 0x244 /* TUNE2 */
+ 0x47 0x240 /* TUNE1 */
+ 0x28 0x244 /* TUNE2 */
0xca 0x248 /* TUNE3 */
0x04 0x24c /* TUNE4 */
0x03 0x250 /* TUNE5 */
@@ -169,12 +169,12 @@
0x0a 0x184 /* PLL_LOCK_DELAY */
0x19 0xb4 /* PLL_DIGITAL_TIMERS_TWO */
0x40 0x194 /* PLL_BIAS_CONTROL_1 */
- 0x22 0x198 /* PLL_BIAS_CONTROL_2 */
+ 0x18 0x198 /* PLL_BIAS_CONTROL_2 */
0x21 0x214 /* PWR_CTRL2 */
0x08 0x220 /* IMP_CTRL1 */
0x58 0x224 /* IMP_CTRL2 */
- 0x67 0x240 /* TUNE1 */
- 0x29 0x244 /* TUNE2 */
+ 0x47 0x240 /* TUNE1 */
+ 0x28 0x244 /* TUNE2 */
0xca 0x248 /* TUNE3 */
0x04 0x24c /* TUNE4 */
0x03 0x250 /* TUNE5 */
diff --git a/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi b/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
index 95933e5424a5..d03c4da02a9e 100644
--- a/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll-sde-display.dtsi
@@ -15,6 +15,7 @@
#include "dsi-panel-sim-video.dtsi"
#include "dsi-panel-sim-cmd.dtsi"
#include "dsi-panel-sim-dsc375-cmd.dtsi"
+#include "dsi-panel-nt36672c-fhd-plus-video.dtsi"
#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
&soc {
@@ -143,6 +144,18 @@
qcom,dsi-panel = <&dsi_sim_dsc_375_cmd>;
};
+ dsi_nt36672c_video_display: qcom,dsi-display@5 {
+ label = "dsi_nt36672c_video_display";
+ qcom,display-type = "primary";
+
+ qcom,dsi-ctrl-num = <0>;
+ qcom,dsi-phy-num = <0>;
+
+ qcom,dsi-select-clocks = "mux_byte_clk0", "mux_pixel_clk0",
+ "cphy_byte_clk0", "cphy_pixel_clk0";
+ qcom,dsi-panel = <&dsi_nt36672c_video>;
+ };
+
sde_dsi: qcom,dsi-display {
compatible = "qcom,dsi-display";
@@ -180,7 +193,8 @@
&dsi_rm69299_visionox_amoled_cmd_display
&dsi_sim_vid_display
&dsi_sim_cmd_display
- &dsi_sim_dsc_375_cmd_display>;
+ &dsi_sim_dsc_375_cmd_display
+ &dsi_nt36672c_video_display>;
};
sde_wb: qcom,wb-display@0 {
@@ -335,3 +349,26 @@
};
};
};
+
+&dsi_nt36672c_video {
+ qcom,mdss-dsi-t-clk-post = <0x00>;
+ qcom,mdss-dsi-t-clk-pre = <0x00>;
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "reg_read";
+ qcom,mdss-dsi-panel-status-command = [06 01 00 01 00 00 01 0a];
+ qcom,mdss-dsi-panel-status-command-state = "dsi_hs_mode";
+ qcom,mdss-dsi-panel-status-value = <0x9c>;
+ qcom,mdss-dsi-panel-on-check-value = <0x9c>;
+ qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,dsi-supported-dfps-list = <60 90 50>;
+ qcom,mdss-dsi-pan-enable-dynamic-fps;
+ qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-phy-timings = [00 23 09 09 26 24 09
+ 09 09 06 02 04];
+ qcom,display-topology = <1 0 1>;
+ qcom,default-topology-index = <0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/atoll.dtsi b/arch/arm64/boot/dts/qcom/atoll.dtsi
index 13f77af16b27..2b824fa682ae 100644
--- a/arch/arm64/boot/dts/qcom/atoll.dtsi
+++ b/arch/arm64/boot/dts/qcom/atoll.dtsi
@@ -38,7 +38,7 @@
mem-offline {
compatible = "qcom,mem-offline";
- offline-sizes = <0x1 0x40000000 0x0 0x80000000>,
+ offline-sizes = <0x1 0x40000000 0x0 0x40000000>,
<0x1 0xc0000000 0x0 0xc0000000>,
<0x2 0xc0000000 0x1 0x40000000>;
granule = <512>;
@@ -640,7 +640,7 @@
};
disp_rdump_memory: disp_rdump_region@9c000000 {
- reg = <0x0 0x9c000000 0x0 0x01800000>;
+ reg = <0x0 0x9c000000 0x0 0x01700000>;
label = "disp_rdump_region";
};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36672c-fhd-plus-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36672c-fhd-plus-video.dtsi
new file mode 100644
index 000000000000..364b51bd4ed6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36672c-fhd-plus-video.dtsi
@@ -0,0 +1,113 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&mdss_mdp {
+ dsi_nt36672c_video: qcom,mdss_dsi_nt36672c_video {
+ qcom,mdss-dsi-panel-name =
+ "nt36672c fhd plus video mode dsi panel";
+ qcom,mdss-dsi-panel-type = "dsi_video_mode";
+ qcom,mdss-dsi-virtual-channel-id = <0>;
+ qcom,mdss-dsi-stream = <0>;
+ qcom,mdss-dsi-bpp = <24>;
+ qcom,mdss-dsi-underflow-color = <0xff>;
+ qcom,mdss-dsi-border-color = <0>;
+ qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+ qcom,mdss-dsi-bllp-eof-power-mode;
+ qcom,mdss-dsi-bllp-power-mode;
+ qcom,mdss-dsi-lane-0-state;
+ qcom,mdss-dsi-lane-1-state;
+ qcom,mdss-dsi-lane-2-state;
+ qcom,mdss-dsi-dma-trigger = "trigger_sw";
+ qcom,mdss-dsi-mdp-trigger = "none";
+ qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 20>;
+ qcom,mdss-pan-physical-width-dimension = <74>;
+ qcom,mdss-pan-physical-height-dimension = <131>;
+
+ qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+ qcom,mdss-dsi-panel-blackness-level = <3230>;
+ qcom,panel-cphy-mode;
+
+ qcom,mdss-dsi-display-timings {
+ timing@0{
+ qcom,mdss-dsi-panel-width = <1080>;
+ qcom,mdss-dsi-panel-height = <2400>;
+ qcom,mdss-dsi-h-front-porch = <202>;
+ qcom,mdss-dsi-h-back-porch = <56>;
+ qcom,mdss-dsi-h-pulse-width = <12>;
+ qcom,mdss-dsi-h-sync-skew = <0>;
+ qcom,mdss-dsi-v-back-porch = <10>;
+ qcom,mdss-dsi-v-front-porch = <1291>;
+ qcom,mdss-dsi-v-pulse-width = <10>;
+ qcom,mdss-dsi-panel-framerate = <60>;
+ qcom,mdss-dsi-on-command = [
+ 29 01 00 00 00 00 02 FF 10
+ 29 01 00 00 00 00 02 FB 01
+ 29 01 00 00 00 00 02 B0 00
+ 29 01 00 00 00 00 02 C0 00
+ 29 01 00 00 00 00 03 C2 1B A0
+
+ 29 01 00 00 00 00 02 FF 25
+ 29 01 00 00 00 00 02 FB 01
+ 29 01 00 00 00 00 02 18 20
+
+ 15 01 00 00 00 00 02 FF 2A
+ 15 01 00 00 00 00 02 FB 01
+ 15 01 00 00 00 00 02 27 80
+ 15 01 00 00 00 00 02 28 FD
+
+ 29 01 00 00 00 00 02 FF F0
+ 29 01 00 00 00 00 02 FB 01
+ 29 01 00 00 00 00 02 5A 00
+ 29 01 00 00 00 00 02 A0 08
+ 29 01 00 00 00 00 02 FF D0
+ 29 01 00 00 00 00 02 FB 01
+ 29 01 00 00 00 00 02 09 AD
+
+ 15 01 00 00 00 00 02 FF 10
+ 15 01 00 00 00 00 02 FB 01
+ 15 01 00 00 00 00 02 51 FF
+ 15 01 00 00 00 00 02 53 2C
+ 15 01 00 00 00 00 02 FF 23
+ 15 01 00 00 00 00 02 FB 01
+
+ 15 01 00 00 00 00 02 0A 00
+ 15 01 00 00 00 00 02 0B 00
+ 15 01 00 00 00 00 02 0C 00
+ 15 01 00 00 00 00 02 0D 00
+
+ 15 01 00 00 00 00 02 11 01
+ 15 01 00 00 00 00 02 12 95
+ 15 01 00 00 00 00 02 15 68
+ 15 01 00 00 00 00 02 16 0B
+ 15 01 00 00 00 00 02 6F 00
+ 15 01 00 00 00 00 02 70 00
+ 15 01 00 00 00 00 02 71 00
+ 15 01 00 00 00 00 02 A0 11
+ 15 01 00 00 00 00 02 FF F0
+ 15 01 00 00 00 00 02 FB 01
+ 15 01 00 00 00 00 02 D2 52
+
+ 29 01 00 00 00 00 02 FF 10
+ 29 01 00 00 00 00 02 35 00
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 14 00 02 29 00
+ ];
+ qcom,mdss-dsi-off-command =[
+ 05 01 00 00 14 00 02 28 00
+ 05 01 00 00 78 00 02 10 00
+ ];
+ qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs405-iot-sku12.dts b/arch/arm64/boot/dts/qcom/qcs405-iot-sku12.dts
index 9546c4300c79..ef746b2c4894 100644
--- a/arch/arm64/boot/dts/qcom/qcs405-iot-sku12.dts
+++ b/arch/arm64/boot/dts/qcom/qcs405-iot-sku12.dts
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,3 +34,26 @@
};
};
};
+
+&sdhc_2 {
+ /delete-property/ qcom,devfreq,freq-table;
+ /delete-property/ cd-gpios;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000>;
+ /* VDD is an external regulator eLDO5 */
+ vdd-io-supply = <&pms405_l6>;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ post-power-on-delay-ms = <100>;
+
+ qcom,core_3_0v_support;
+ qcom,nonremovable;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+ &sdc2_wlan_on4>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+ &sdc2_wlan_off4>;
+
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs407-iot-sku12.dts b/arch/arm64/boot/dts/qcom/qcs407-iot-sku12.dts
index dbaf21baa7c1..c62abd1eb1ee 100644
--- a/arch/arm64/boot/dts/qcom/qcs407-iot-sku12.dts
+++ b/arch/arm64/boot/dts/qcom/qcs407-iot-sku12.dts
@@ -34,3 +34,26 @@
};
};
};
+
+&sdhc_2 {
+ /delete-property/ qcom,devfreq,freq-table;
+ /delete-property/ cd-gpios;
+
+ qcom,clk-rates = <400000 20000000 25000000
+ 50000000 100000000>;
+ /* VDD is an external regulator eLDO5 */
+ vdd-io-supply = <&pms405_l6>;
+ qcom,vdd-io-voltage-level = <1800000 1800000>;
+ post-power-on-delay-ms = <100>;
+
+ qcom,core_3_0v_support;
+ qcom,nonremovable;
+
+ pinctrl-names = "active", "sleep";
+ pinctrl-0 = <&sdc2_clk_on &sdc2_cmd_on &sdc2_data_on
+ &sdc2_wlan_on4>;
+ pinctrl-1 = <&sdc2_clk_off &sdc2_cmd_off &sdc2_data_off
+ &sdc2_wlan_off4>;
+
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdx-audio-lpass.dtsi b/arch/arm64/boot/dts/qcom/sdx-audio-lpass.dtsi
index 2c2319f7c886..c7a184201000 100644
--- a/arch/arm64/boot/dts/qcom/sdx-audio-lpass.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx-audio-lpass.dtsi
@@ -302,6 +302,7 @@
};
prim_slave: prim_slave_pinctrl {
+ status = "disabled";
compatible = "qcom,msm-cdc-pinctrl";
pinctrl-names = "aud_active", "aud_sleep";
pinctrl-0 = <&pri_ws_active_slave
@@ -330,6 +331,7 @@
};
sec_slave: sec_slave_pinctrl {
+ status = "disabled";
compatible = "qcom,msm-cdc-pinctrl";
pinctrl-names = "aud_active", "aud_sleep";
pinctrl-0 = <&sec_ws_active_slave
diff --git a/arch/arm64/boot/dts/qcom/sdx-wsa881x.dtsi b/arch/arm64/boot/dts/qcom/sdx-wsa881x.dtsi
index 3d4e90a260e5..ddcaf1ab23a1 100644
--- a/arch/arm64/boot/dts/qcom/sdx-wsa881x.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdx-wsa881x.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,18 +18,21 @@
#size-cells = <0>;
wsa881x_0211: wsa881x@20170211 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
};
wsa881x_0212: wsa881x@20170212 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd2>;
};
wsa881x_0213: wsa881x@21170213 {
+ status = "disabled";
compatible = "qcom,wsa881x";
reg = <0x00 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_wcd_sd1>;
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-aqc.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-aqc.dtsi
index ff0802a715b8..4a3e587e86f1 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-aqc.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-aqc.dtsi
@@ -10,8 +10,28 @@
* GNU General Public License for more details.
*/
+&soc {
+ aqo_proxy_host: aqo_host_proxy@17a00040 {
+ reg = <0x17800200 0>;
+ reg-names = "intc-ispendr-n";
+
+ interrupt-parent = <&intc>;
+ interrupts = <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,proxy-agent = "host";
+ qcom,proxy-method = "msi";
+ };
+
+ aqo_proxy_uc: aqo_uc_proxy@1ec2080 {
+ qcom,proxy-agent = "uc";
+ qcom,proxy-method = "msi";
+ qcom,proxy-msi-addr = /bits/ 64 <0x01ec2080>;
+ qcom,proxy-msi-data = /bits/ 32 <0x636f6d6d>;
+ };
+};
+
&pcie0_rp {
- aqc_x4: aquantia,aqc107@pcie0_rp {
+ aquantia,aqc107@pcie0_rp {
reg = <0 0 0 0 0>;
compatible = "aquantia,aqc-107";
@@ -50,9 +70,70 @@
qcom,smmu-attr-fastmap;
/* AQC IPA offload driver */
+ qcom,rx-proxy = <&aqo_proxy_host>,
+ <&aqo_proxy_uc>;
+ qcom,rx-proxy-mode = "counter";
+
+ qcom,rx-ring-size = <4096>;
+ qcom,rx-buff-size = <2048>;
+ qcom,rx-int-mod-usecs = <64>;
+
+ qcom,rx-gsi-mod-pc = <10>;
+ qcom,rx-gsi-mod-timer = <32>;
+
+ qcom,tx-ring-size = <4096>;
+ qcom,tx-buff-size = <2048>;
+ qcom,tx-wrb-mod-pc = <25>;
+
+ qcom,tx-gsi-mod-pc = <10>;
+ qcom,tx-gsi-mod-timer = <32>;
+
+ qcom,use-pci-direct;
+ };
+};
+
+&pcie0_bus2_dev1_fn0 {
+ aquantia,aqc107@pcie0_bus2_dev1_fn0 {
+ reg = <0 0 0 0 0>;
+
+ compatible = "aquantia,aqc-107";
- qcom,rx-proxy = <&atd_proxy_host>,
- <&atd_proxy_uc>;
+ pci-ids =
+ "1d6a:0001",
+ "1d6a:d107",
+ "1d6a:07b1",
+ "1d6a:87b1",
+ "1d6a:d108",
+ "1d6a:08b1",
+ "1d6a:88b1",
+ "1d6a:d109",
+ "1d6a:09b1",
+ "1d6a:89b1",
+ "1d6a:d100",
+ "1d6a:00b1",
+ "1d6a:80b1",
+ "1d6a:11b1",
+ "1d6a:91b1",
+ "1d6a:51b1",
+ "1d6a:12b1",
+ "1d6a:92b1",
+ "1d6a:52b1";
+
+ qcom,smmu;
+
+ /* IOVA range is restricted to avoid conflicts with PCI BAR
+ * space and IOVA spaces used by peripherals that are currently
+ * attached to IPA.
+ */
+ qcom,smmu-iova-base = /bits/ 64 <0x80000000>;
+ qcom,smmu-iova-size = /bits/ 64 <0x10000000>;
+
+ qcom,smmu-attr-atomic;
+ qcom,smmu-attr-fastmap;
+
+ /* AQC IPA offload driver */
+ qcom,rx-proxy = <&aqo_proxy_host>,
+ <&aqo_proxy_uc>;
qcom,rx-proxy-mode = "counter";
qcom,rx-ring-size = <4096>;
@@ -70,26 +151,67 @@
qcom,tx-gsi-mod-timer = <32>;
qcom,use-pci-direct;
+ };
+};
+
+&pcie0_bus2_dev2_fn0 {
+ aquantia,aqc107@pcie0_bus2_dev1_fn0 {
+ reg = <0 0 0 0 0>;
+
+ compatible = "aquantia,aqc-107";
+
+ pci-ids =
+ "1d6a:0001",
+ "1d6a:d107",
+ "1d6a:07b1",
+ "1d6a:87b1",
+ "1d6a:d108",
+ "1d6a:08b1",
+ "1d6a:88b1",
+ "1d6a:d109",
+ "1d6a:09b1",
+ "1d6a:89b1",
+ "1d6a:d100",
+ "1d6a:00b1",
+ "1d6a:80b1",
+ "1d6a:11b1",
+ "1d6a:91b1",
+ "1d6a:51b1",
+ "1d6a:12b1",
+ "1d6a:92b1",
+ "1d6a:52b1";
- #address-cells = <1>;
- #size-cells = <1>;
+ qcom,smmu;
- atd_proxy_host: host_proxy@17a00040 {
- reg = <0x17800200 0>;
- reg-names = "intc-ispendr-n";
+ /* IOVA range is restricted to avoid conflicts with PCI BAR
+ * space and IOVA spaces used by peripherals that are currently
+ * attached to IPA.
+ */
+ qcom,smmu-iova-base = /bits/ 64 <0x80000000>;
+ qcom,smmu-iova-size = /bits/ 64 <0x10000000>;
- interrupt-parent = <&intc>;
- interrupts = <GIC_SPI 292 IRQ_TYPE_EDGE_RISING>;
+ qcom,smmu-attr-atomic;
+ qcom,smmu-attr-fastmap;
- qcom,proxy-agent = "host";
- qcom,proxy-method = "msi";
- };
+ /* AQC IPA offload driver */
+ qcom,rx-proxy = <&aqo_proxy_host>,
+ <&aqo_proxy_uc>;
+ qcom,rx-proxy-mode = "counter";
- atd_proxy_uc: uc_proxy@1ec2080 {
- qcom,proxy-agent = "uc";
- qcom,proxy-method = "msi";
- qcom,proxy-msi-addr = /bits/ 64 <0x01ec2080>;
- qcom,proxy-msi-data = /bits/ 32 <0x636f6d6d>;
- };
+ qcom,rx-ring-size = <4096>;
+ qcom,rx-buff-size = <2048>;
+ qcom,rx-int-mod-usecs = <64>;
+
+ qcom,rx-gsi-mod-pc = <10>;
+ qcom,rx-gsi-mod-timer = <32>;
+
+ qcom,tx-ring-size = <4096>;
+ qcom,tx-buff-size = <2048>;
+ qcom,tx-wrb-mod-pc = <25>;
+
+ qcom,tx-gsi-mod-pc = <10>;
+ qcom,tx-gsi-mod-timer = <32>;
+
+ qcom,use-pci-direct;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
index 9c7acd5b6e21..3e062686f13d 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-mtp-le-cpe.dtsi
@@ -95,3 +95,15 @@
&usb {
qcom,smmu-s1-bypass;
};
+
+&i2c_3 {
+ tavil_codec {
+ status = "disabled";
+ };
+};
+
+&soc {
+ sound-tavil {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi
index 41e4eb599bdc..dc83e84d5c75 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-regulator.dtsi
@@ -54,9 +54,9 @@
S3E: pmxprairie_s3: regulator-pmxprairie-s3 {
regulator-name = "pmxprairie_s3";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
- regulator-min-microvolt = <950000>;
+ regulator-min-microvolt = <800000>;
regulator-max-microvolt = <1000000>;
- qcom,init-voltage = <950000>;
+ qcom,init-voltage = <800000>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
index e2f1953ab9ef..057651acf87d 100644
--- a/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdxprairie-v2-mtp-le-cpe.dtsi
@@ -92,3 +92,15 @@
&sdx_ext_ipc {
status = "ok";
};
+
+&i2c_3 {
+ tavil_codec {
+ status = "disabled";
+ };
+};
+
+&soc {
+ sound-tavil {
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi
index 210ead532d21..f15a7a966a26 100644
--- a/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sm8150-usb.dtsi
@@ -351,7 +351,6 @@
reg-names = "core_base";
iommus = <&apps_smmu 0x160 0x0>;
- qcom,smmu-s1-bypass;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 87653c86b2e6..d841273aad7a 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -142,6 +142,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pc = pc;
}
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= COMPAT_PSR_SSBS_BIT;
+}
+
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
@@ -149,7 +159,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(regs);
regs->sp = sp;
}
@@ -168,7 +178,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= COMPAT_PSR_SSBS_BIT;
+ set_compat_ssbs_bit(regs);
regs->compat_sp = sp;
}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6f3258a9d0eb..8c8ab812e2ea 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -367,7 +367,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- childregs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(childregs);
p->thread.cpu_context.x19 = stack_start;
p->thread.cpu_context.x20 = stk_sz;
@@ -409,6 +409,32 @@ void uao_thread_switch(struct task_struct *next)
}
/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ struct pt_regs *regs = task_pt_regs(next);
+
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /* If the mitigation is enabled, then we leave SSBS clear. */
+ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+ test_tsk_thread_flag(next, TIF_SSBD))
+ return;
+
+ if (compat_user_mode(regs))
+ set_compat_ssbs_bit(regs);
+ else if (user_mode(regs))
+ set_ssbs_bit(regs);
+}
+
+/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
@@ -436,6 +462,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
contextidr_thread_switch(next);
entry_task_switch(next);
uao_thread_switch(next);
+ ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 1559070d6060..f49229b619c1 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -335,6 +335,9 @@ static ssize_t regmap_data_read_file(struct file *file, char __user *user_buf,
else if (*ppos >= map->dump_address * map->debugfs_tot_len
+ map->dump_count * map->debugfs_tot_len)
return 0;
+ else if (*ppos < map->dump_address * map->debugfs_tot_len)
+ return 0;
+
return regmap_read_debugfs(map, 0, map->max_register, user_buf,
new_count, ppos);
}
diff --git a/drivers/bluetooth/bluetooth-power.c b/drivers/bluetooth/bluetooth-power.c
index b7774077d03e..c1726a3ad91f 100644
--- a/drivers/bluetooth/bluetooth-power.c
+++ b/drivers/bluetooth/bluetooth-power.c
@@ -92,6 +92,7 @@ static int bt_vreg_init(struct bt_power_vreg_data *vreg)
rc = PTR_ERR(vreg->reg);
pr_err("%s: regulator_get(%s) failed. rc=%d\n",
__func__, vreg->name, rc);
+ vreg->reg = NULL;
goto out;
}
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index b914bcb2747d..6213d07a9164 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -63,6 +63,37 @@ enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_LVL_ERROR;
#endif
+void mhi_reg_write_work(struct work_struct *w)
+{
+ struct mhi_controller *mhi_cntrl = container_of(w,
+ struct mhi_controller,
+ reg_write_work);
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
+ struct pci_dev *pci_dev = mhi_dev->pci_dev;
+ struct reg_write_info *info =
+ &mhi_cntrl->reg_write_q[mhi_cntrl->read_idx];
+
+ if (!info->valid)
+ return;
+
+ if (mhi_is_active(mhi_cntrl->mhi_dev) && msm_pcie_prevent_l1(pci_dev))
+ return;
+
+ while (info->valid) {
+ if (!mhi_is_active(mhi_cntrl->mhi_dev))
+ return;
+
+ writel_relaxed(info->val, info->reg_addr);
+ info->valid = false;
+ mhi_cntrl->read_idx =
+ (mhi_cntrl->read_idx + 1) &
+ (REG_WRITE_QUEUE_LEN - 1);
+ info = &mhi_cntrl->reg_write_q[mhi_cntrl->read_idx];
+ }
+
+ msm_pcie_allow_l1(pci_dev);
+}
+
static int mhi_arch_pm_notifier(struct notifier_block *nb,
unsigned long event, void *unused)
{
@@ -173,6 +204,7 @@ static int mhi_arch_esoc_ops_power_on(void *priv, unsigned int flags)
return ret;
}
+ mhi_dev->mdm_state = (flags & ESOC_HOOK_MDM_CRASH);
return mhi_pci_probe(pci_dev, NULL);
}
@@ -295,7 +327,8 @@ static void mhi_boot_monitor(void *data, async_cookie_t cookie)
/* wait for device to enter boot stage */
wait_event_timeout(mhi_cntrl->state_event, mhi_cntrl->ee == MHI_EE_AMSS
- || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION,
+ || mhi_cntrl->ee == MHI_EE_DISABLE_TRANSITION
+ || mhi_cntrl->power_down,
timeout);
ipc_log_string(arch_info->boot_ipc_log, HLOG "Device current ee = %s\n",
@@ -316,8 +349,9 @@ int mhi_arch_power_up(struct mhi_controller *mhi_cntrl)
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
- /* start a boot monitor */
- arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
+ /* start a boot monitor if not in crashed state */
+ if (!mhi_dev->mdm_state)
+ arch_info->cookie = async_schedule(mhi_boot_monitor, mhi_cntrl);
return 0;
}
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index 76560369346f..f0c71a621a63 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -545,8 +545,9 @@ static int mhi_qcom_power_up(struct mhi_controller *mhi_cntrl)
return -EIO;
}
- /* when coming out of SSR, initial ee state is not valid */
+ /* when coming out of SSR, initial states are not valid */
mhi_cntrl->ee = 0;
+ mhi_cntrl->power_down = false;
ret = mhi_arch_power_up(mhi_cntrl);
if (ret)
@@ -779,8 +780,29 @@ static struct mhi_controller *mhi_register_controller(struct pci_dev *pci_dev)
if (ret)
goto error_register;
+ if (mhi_dev->allow_m1)
+ goto skip_offload;
+
+ mhi_cntrl->offload_wq = alloc_ordered_workqueue("offload_wq",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ if (!mhi_cntrl->offload_wq)
+ goto error_register;
+
+ INIT_WORK(&mhi_cntrl->reg_write_work, mhi_reg_write_work);
+
+ mhi_cntrl->reg_write_q = kcalloc(REG_WRITE_QUEUE_LEN,
+ sizeof(*mhi_cntrl->reg_write_q),
+ GFP_KERNEL);
+ if (!mhi_cntrl->reg_write_q)
+ goto error_free_wq;
+
+ atomic_set(&mhi_cntrl->write_idx, -1);
+
+skip_offload:
return mhi_cntrl;
+error_free_wq:
+ destroy_workqueue(mhi_cntrl->offload_wq);
error_register:
mhi_free_controller(mhi_cntrl);
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index edbd9174962d..a3ba3673eb91 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -56,6 +56,7 @@ struct mhi_dev {
void *arch_info;
bool powered_on;
bool allow_m1;
+ bool mdm_state;
dma_addr_t iova_start;
dma_addr_t iova_stop;
enum mhi_suspend_mode suspend_mode;
@@ -68,6 +69,7 @@ struct mhi_dev {
void mhi_deinit_pci_dev(struct mhi_controller *mhi_cntrl);
int mhi_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *device_id);
+void mhi_reg_write_work(struct work_struct *w);
#ifdef CONFIG_ARCH_QCOM
diff --git a/drivers/bus/mhi/core/mhi_boot.c b/drivers/bus/mhi/core/mhi_boot.c
index d2232ee2a9a4..048031e27355 100644
--- a/drivers/bus/mhi/core/mhi_boot.c
+++ b/drivers/bus/mhi/core/mhi_boot.c
@@ -46,13 +46,14 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
MHI_LOG("BHIe programming for RDDM\n");
- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS, mhi_buf->len);
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_RXVECSIZE_OFFS,
+ mhi_buf->len);
sequence_id = prandom_u32() & BHIE_RXVECSTATUS_SEQNUM_BMSK;
if (unlikely(!sequence_id))
@@ -122,7 +123,7 @@ static int __mhi_download_rddm_in_panic(struct mhi_controller *mhi_cntrl)
/* Hardware reset; force device to enter rddm */
MHI_LOG(
"Did not enter RDDM, do a host req. reset\n");
- mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs,
MHI_SOC_RESET_REQ_OFFSET,
MHI_SOC_RESET_REQ);
udelay(delayus);
@@ -198,13 +199,14 @@ static int mhi_fw_load_amss(struct mhi_controller *mhi_cntrl,
MHI_LOG("Starting BHIe Programming\n");
- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECADDR_HIGH_OFFS,
upper_32_bits(mhi_buf->dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECADDR_LOW_OFFS,
lower_32_bits(mhi_buf->dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS, mhi_buf->len);
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHIE_TXVECSIZE_OFFS,
+ mhi_buf->len);
mhi_cntrl->sequence_id = prandom_u32() & BHIE_TXVECSTATUS_SEQNUM_BMSK;
mhi_write_reg_field(mhi_cntrl, base, BHIE_TXVECDB_OFFS,
@@ -262,14 +264,15 @@ static int mhi_fw_load_sbl(struct mhi_controller *mhi_cntrl,
goto invalid_pm_state;
}
- mhi_write_reg(mhi_cntrl, base, BHI_STATUS, 0);
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHI_STATUS, 0);
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGADDR_HIGH,
upper_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGADDR_LOW,
lower_32_bits(dma_addr));
- mhi_write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGSIZE, size);
mhi_cntrl->session_id = prandom_u32() & BHI_TXDB_SEQNUM_BMSK;
- mhi_write_reg(mhi_cntrl, base, BHI_IMGTXDB, mhi_cntrl->session_id);
+ mhi_cntrl->write_reg(mhi_cntrl, base, BHI_IMGTXDB,
+ mhi_cntrl->session_id);
read_unlock_bh(pm_lock);
MHI_LOG("Waiting for image transfer completion\n");
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index 5f3fd7b10806..7ecda70761fb 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -85,7 +85,7 @@ struct mhi_controller *find_mhi_controller_by_name(const char *name)
list_for_each_entry_safe(mhi_cntrl, tmp_cntrl, &mhi_bus.controller_list,
node) {
- if (!strcmp(name, mhi_cntrl->name))
+ if (mhi_cntrl->name && (!strcmp(name, mhi_cntrl->name)))
return mhi_cntrl;
}
@@ -734,7 +734,7 @@ static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
/* advertise host support */
- mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
MHI_BW_SCALE_SETUP(er_index));
return 0;
@@ -832,8 +832,8 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
/* setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
- mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
- mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->wake_db, 4, 0);
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
mhi_cntrl->wake_set = false;
/* setup bw scale db */
@@ -1439,6 +1439,8 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
+ mhi_cntrl->write_reg = mhi_write_reg;
+
/* read the device info if possible */
if (mhi_cntrl->regs) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index 91437d04e017..eecc8408f967 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -834,6 +834,8 @@ void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
int mhi_early_notify_device(struct device *dev, void *data);
+void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl,
+ void __iomem *base, u32 offset, u32 val);
/* timesync log support */
static inline void mhi_timesync_log(struct mhi_controller *mhi_cntrl)
@@ -903,6 +905,8 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
+void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl);
+void mhi_force_reg_write(struct mhi_controller *mhi_cntrl);
/* isr handlers */
irqreturn_t mhi_msi_handlr(int irq_number, void *dev);
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 213abb12fcf1..9a0b0d50b0c7 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -100,6 +100,44 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
return -ENXIO;
}
+void mhi_force_reg_write(struct mhi_controller *mhi_cntrl)
+{
+ if (mhi_cntrl->offload_wq)
+ flush_work(&mhi_cntrl->reg_write_work);
+}
+
+void mhi_reset_reg_write_q(struct mhi_controller *mhi_cntrl)
+{
+ cancel_work_sync(&mhi_cntrl->reg_write_work);
+ memset(mhi_cntrl->reg_write_q, 0,
+ sizeof(struct reg_write_info) * REG_WRITE_QUEUE_LEN);
+ mhi_cntrl->read_idx = 0;
+ atomic_set(&mhi_cntrl->write_idx, -1);
+}
+
+static void mhi_reg_write_enqueue(struct mhi_controller *mhi_cntrl,
+ void __iomem *reg_addr, u32 val)
+{
+ u32 q_index = atomic_inc_return(&mhi_cntrl->write_idx);
+
+ q_index = q_index & (REG_WRITE_QUEUE_LEN - 1);
+
+ MHI_ASSERT(mhi_cntrl->reg_write_q[q_index].valid, "queue full idx %d");
+
+ mhi_cntrl->reg_write_q[q_index].reg_addr = reg_addr;
+ mhi_cntrl->reg_write_q[q_index].val = val;
+ mhi_cntrl->reg_write_q[q_index].valid = true;
+}
+
+void mhi_write_reg_offload(struct mhi_controller *mhi_cntrl,
+ void __iomem *base,
+ u32 offset,
+ u32 val)
+{
+ mhi_reg_write_enqueue(mhi_cntrl, base + offset, val);
+ queue_work(mhi_cntrl->offload_wq, &mhi_cntrl->reg_write_work);
+}
+
void mhi_write_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base,
u32 offset,
@@ -124,15 +162,15 @@ void mhi_write_reg_field(struct mhi_controller *mhi_cntrl,
tmp &= ~mask;
tmp |= (val << shift);
- mhi_write_reg(mhi_cntrl, base, offset, tmp);
+ mhi_cntrl->write_reg(mhi_cntrl, base, offset, tmp);
}
void mhi_write_db(struct mhi_controller *mhi_cntrl,
void __iomem *db_addr,
dma_addr_t wp)
{
- mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
- mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
+ mhi_cntrl->write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(wp));
+ mhi_cntrl->write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(wp));
}
void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
@@ -449,7 +487,7 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
struct mhi_buf_info *buf_info;
struct mhi_tre *mhi_tre;
bool ring_db = true;
- int nr_tre;
+ int n_free_tre, n_queued_tre;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
@@ -493,9 +531,12 @@ int mhi_queue_dma(struct mhi_device *mhi_dev,
* on RSC channel IPA HW has a minimum credit requirement before
* switching to DB mode
*/
- nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ n_free_tre = mhi_get_no_free_descriptors(mhi_dev,
+ DMA_FROM_DEVICE);
+ n_queued_tre = tre_ring->elements - n_free_tre;
read_lock_bh(&mhi_chan->lock);
- if (mhi_chan->db_cfg.db_mode && nr_tre < MHI_RSC_MIN_CREDITS)
+ if (mhi_chan->db_cfg.db_mode &&
+ n_queued_tre < MHI_RSC_MIN_CREDITS)
ring_db = false;
read_unlock_bh(&mhi_chan->lock);
} else {
@@ -910,7 +951,7 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
struct mhi_result result;
unsigned long flags = 0;
bool ring_db = true;
- int nr_tre;
+ int n_free_tre, n_queued_tre;
ev_code = MHI_TRE_GET_EV_CODE(event);
buf_ring = &mhi_chan->buf_ring;
@@ -1011,9 +1052,10 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl,
* switching to DB mode
*/
if (mhi_chan->xfer_type == MHI_XFER_RSC_DMA) {
- nr_tre = mhi_get_no_free_descriptors(mhi_chan->mhi_dev,
- DMA_FROM_DEVICE);
- if (nr_tre < MHI_RSC_MIN_CREDITS)
+ n_free_tre = mhi_get_no_free_descriptors(
+ mhi_chan->mhi_dev, DMA_FROM_DEVICE);
+ n_queued_tre = tre_ring->elements - n_free_tre;
+ if (n_queued_tre < MHI_RSC_MIN_CREDITS)
ring_db = false;
}
@@ -1422,29 +1464,28 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
int result, ret = 0;
- mutex_lock(&mhi_cntrl->pm_mutex);
-
if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
MHI_LOG("No EV access, PM_STATE:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state));
ret = -EIO;
- goto exit_bw_process;
+ goto exit_no_lock;
}
- /*
- * BW change is not process during suspend since we're suspending link,
- * host will process it during resume
- */
- if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
- ret = -EACCES;
- goto exit_bw_process;
- }
+ ret = __mhi_device_get_sync(mhi_cntrl);
+ if (ret)
+ goto exit_no_lock;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
spin_lock_bh(&mhi_event->lock);
dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
if (ev_ring->rp == dev_rp) {
spin_unlock_bh(&mhi_event->lock);
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ mhi_cntrl->wake_put(mhi_cntrl, false);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ MHI_VERB("no pending event found\n");
goto exit_bw_process;
}
@@ -1486,16 +1527,19 @@ int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
read_lock_bh(&mhi_cntrl->pm_lock);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
- mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
MHI_BW_SCALE_RESULT(result,
link_info.sequence_num));
+
+ mhi_cntrl->wake_put(mhi_cntrl, false);
read_unlock_bh(&mhi_cntrl->pm_lock);
exit_bw_process:
- MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
-
mutex_unlock(&mhi_cntrl->pm_mutex);
+exit_no_lock:
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
return ret;
}
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 859188f7b33b..17f560fcb2b2 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -156,8 +156,8 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 1);
} else {
- mhi_write_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
- MHICTRL_MHISTATE_MASK, MHICTRL_MHISTATE_SHIFT, state);
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ (state << MHICTRL_MHISTATE_SHIFT));
}
}
@@ -208,7 +208,8 @@ void mhi_deassert_dev_wake(struct mhi_controller *mhi_cntrl, bool override)
{
unsigned long flags;
- MHI_ASSERT(atomic_read(&mhi_cntrl->dev_wake) == 0, "dev_wake == 0");
+ MHI_ASSERT((mhi_is_active(mhi_cntrl->mhi_dev) &&
+ atomic_read(&mhi_cntrl->dev_wake) == 0), "dev_wake == 0");
/* resources not dropping to 0, decrement and exit */
if (likely(atomic_add_unless(&mhi_cntrl->dev_wake, -1, 1)))
@@ -457,6 +458,12 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
wake_up_all(&mhi_cntrl->state_event);
+ /* offload register write if supported */
+ if (mhi_cntrl->offload_wq) {
+ mhi_reset_reg_write_q(mhi_cntrl);
+ mhi_cntrl->write_reg = mhi_write_reg_offload;
+ }
+
/* force MHI to be in M0 state before continuing */
ret = __mhi_device_get_sync(mhi_cntrl);
if (ret)
@@ -534,6 +541,12 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
TO_MHI_STATE_STR(mhi_cntrl->dev_state),
to_mhi_pm_state_str(transition_state));
+ /* restore async write call back */
+ mhi_cntrl->write_reg = mhi_write_reg;
+
+ if (mhi_cntrl->offload_wq)
+ mhi_reset_reg_write_q(mhi_cntrl);
+
/* We must notify MHI control driver so it can clean up first */
if (transition_state == MHI_PM_SYS_ERR_PROCESS) {
/*
@@ -558,6 +571,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
mhi_cntrl->ee = MHI_EE_DISABLE_TRANSITION;
mhi_cntrl->dev_state = MHI_STATE_RESET;
}
+ /* notify controller of power down regardless of state transitions */
+ mhi_cntrl->power_down = true;
write_unlock_irq(&mhi_cntrl->pm_lock);
/* wake up any threads waiting for state transitions */
@@ -596,7 +611,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
* device cleares INTVEC as part of RESET processing,
* re-program it
*/
- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
MHI_LOG("Waiting for all pending event ring processing to complete\n");
@@ -840,6 +855,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
u32 val;
enum mhi_ee current_ee;
enum MHI_ST_TRANSITION next_state;
+ struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
MHI_LOG("Requested to power on\n");
@@ -855,6 +871,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_toggle_dev_wake_nop : mhi_toggle_dev_wake;
}
+ /* clear votes before proceeding for power up */
+ atomic_set(&mhi_dev->dev_vote, 0);
+ atomic_set(&mhi_dev->bus_vote, 0);
+
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
@@ -896,7 +916,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_cntrl->bhie = mhi_cntrl->regs + val;
}
- mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
+ mhi_cntrl->write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
mhi_cntrl->pm_state = MHI_PM_POR;
mhi_cntrl->ee = MHI_EE_MAX;
current_ee = mhi_get_exec_env(mhi_cntrl);
@@ -968,6 +988,8 @@ void mhi_control_error(struct mhi_controller *mhi_cntrl)
goto exit_control_error;
}
+ mhi_cntrl->dev_state = MHI_STATE_SYS_ERR;
+
/* notify waiters to bail out early since MHI has entered ERROR state */
wake_up_all(&mhi_cntrl->state_event);
@@ -1407,6 +1429,9 @@ int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl)
mhi_trigger_resume(mhi_cntrl);
read_unlock_bh(&mhi_cntrl->pm_lock);
+ /* for offload write make sure wake DB is set before any MHI reg read */
+ mhi_force_reg_write(mhi_cntrl);
+
ret = wait_event_timeout(mhi_cntrl->state_event,
mhi_cntrl->pm_state == MHI_PM_M0 ||
MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state),
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 7c4225b9b311..17bb945a4b90 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -209,6 +209,9 @@
#define DEFAULT_LOW_WM_VAL 15
#define DEFAULT_HIGH_WM_VAL 85
+#define HDLC_CTXT 1
+#define NON_HDLC_CTXT 2
+
#define TYPE_DATA 0
#define TYPE_CNTL 1
#define TYPE_DCI 2
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 3c87d9b172e4..4315f6dc7a32 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -331,6 +331,14 @@ static int diagchar_open(struct inode *inode, struct file *file)
if (driver) {
mutex_lock(&driver->diagchar_mutex);
+ for (i = 0; i < driver->num_clients; i++) {
+ if (driver->client_map[i].pid == current->tgid) {
+ pr_err_ratelimited("diag: Client already present current->tgid: %d\n",
+ current->tgid);
+ mutex_unlock(&driver->diagchar_mutex);
+ return -EEXIST;
+ }
+ }
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == 0)
break;
@@ -3623,9 +3631,12 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
int proc = 0;
mutex_lock(&driver->diagchar_mutex);
- for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid == current->tgid)
+ for (i = 0; i < driver->num_clients; i++) {
+ if (driver->client_map[i].pid == current->tgid) {
index = i;
+ break;
+ }
+ }
mutex_unlock(&driver->diagchar_mutex);
if (index == -1) {
@@ -4427,10 +4438,12 @@ static int __init diagchar_init(void)
driver->pcie_switch_pid = 0;
driver->rsp_buf_ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_CMD, TYPE_CMD);
hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ hdlc_data.ctxt |= SET_HDLC_CTXT(HDLC_CTXT);
hdlc_data.len = 0;
hdlc_data.allocated = 0;
hdlc_data.flushed = 0;
non_hdlc_data.ctxt = SET_BUF_CTXT(APPS_DATA, TYPE_DATA, 1);
+ non_hdlc_data.ctxt |= SET_HDLC_CTXT(NON_HDLC_CTXT);
non_hdlc_data.len = 0;
non_hdlc_data.allocated = 0;
non_hdlc_data.flushed = 0;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index b8194ebaf84f..332e6449deed 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1841,9 +1841,8 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
int ctxt)
{
unsigned long flags;
- int peripheral = -1;
- int type = -1;
- int num = -1;
+ int peripheral = -1, type = -1;
+ int num = -1, hdlc_ctxt = -1;
struct diag_apps_data_t *temp = NULL;
if (!buf || len < 0)
@@ -1863,16 +1862,19 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
diag_ws_on_copy(DIAG_WS_MUX);
} else if (peripheral == APPS_DATA) {
spin_lock_irqsave(&driver->diagmem_lock, flags);
- if (hdlc_data.allocated)
+ hdlc_ctxt = GET_HDLC_CTXT(buf_ctxt);
+ if ((hdlc_ctxt == HDLC_CTXT) && hdlc_data.allocated)
temp = &hdlc_data;
- else if (non_hdlc_data.allocated)
+ else if ((hdlc_ctxt == NON_HDLC_CTXT) &&
+ non_hdlc_data.allocated)
temp = &non_hdlc_data;
else
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"No apps data buffer is allocated to be freed\n");
if (temp) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "Freeing Apps data buffer after write done hdlc.allocated: %d, non_hdlc.allocated: %d\n",
+ "Freeing Apps data buffer after write done hdlc_ctxt: %d, hdlc.allocated: %d, non_hdlc.allocated: %d\n",
+ hdlc_ctxt,
hdlc_data.allocated, non_hdlc_data.allocated);
diagmem_free(driver, temp->buf, POOL_TYPE_HDLC);
temp->buf = NULL;
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 7153f08e2a8c..a6532ad4e1a9 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -25,6 +25,9 @@
#define GET_BUF_NUM(n) ((n & 0x0000FF))
#define GET_PD_CTXT(u) ((u & 0xFF000000) >> 24)
+#define SET_HDLC_CTXT(u) ((u & 0xFF) << 24)
+#define GET_HDLC_CTXT(u) ((u & 0xFF000000) >> 24)
+
#define CHK_OVERFLOW(bufStart, start, end, length) \
((((bufStart) <= (start)) && ((end) - (start) >= (length))) ? 1 : 0)
diff --git a/drivers/clk/qcom/gcc-atoll.c b/drivers/clk/qcom/gcc-atoll.c
index 7d37d989d6b5..acaf43aeaaef 100644
--- a/drivers/clk/qcom/gcc-atoll.c
+++ b/drivers/clk/qcom/gcc-atoll.c
@@ -1405,7 +1405,7 @@ static struct clk_branch gcc_npu_axi_clk = {
static struct clk_branch gcc_npu_bwmon_axi_clk = {
.halt_reg = 0x73008,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x73008,
.enable_mask = BIT(0),
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index ab9f7f6dec84..fd34c0bc39f5 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -846,18 +846,27 @@ static int qcom_ice_remove(struct platform_device *pdev)
static int qcom_ice_suspend(struct platform_device *pdev)
{
struct ice_device *ice_dev;
+ int ret = 0;
ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
if (!ice_dev)
return -EINVAL;
- if (atomic_read(&ice_dev->is_ice_busy) != 0)
- wait_event_interruptible_timeout(
+ if (atomic_read(&ice_dev->is_ice_busy) != 0) {
+ ret = wait_event_interruptible_timeout(
ice_dev->block_suspend_ice_queue,
- atomic_read(&ice_dev->is_ice_busy) != 0,
+ atomic_read(&ice_dev->is_ice_busy) == 0,
msecs_to_jiffies(1000));
+ if (!ret) {
+ pr_err("%s: Suspend ICE during an ongoing operation\n",
+ __func__);
+ atomic_set(&ice_dev->is_ice_suspended, 0);
+ return -ETIME;
+ }
+ }
+
atomic_set(&ice_dev->is_ice_suspended, 1);
return 0;
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
index aa9eae3de3cd..551823bf5d12 100644
--- a/drivers/devfreq/governor_msm_adreno_tz.c
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -142,6 +142,8 @@ void compute_work_load(struct devfreq_dev_status *stats,
struct devfreq_msm_adreno_tz_data *priv,
struct devfreq *devfreq)
{
+ u64 busy;
+
spin_lock(&sample_lock);
/*
* Keep collecting the stats till the client
@@ -149,9 +151,10 @@ void compute_work_load(struct devfreq_dev_status *stats,
* is done when the entry is read
*/
acc_total += stats->total_time;
- acc_relative_busy += ((u64)stats->busy_time *
- stats->current_frequency) /
- devfreq->profile->freq_table[0];
+ busy = (u64)stats->busy_time * stats->current_frequency;
+ do_div(busy, devfreq->profile->freq_table[0]);
+ acc_relative_busy += busy;
+
spin_unlock(&sample_lock);
}
diff --git a/drivers/devfreq/m4m-hwmon.c b/drivers/devfreq/m4m-hwmon.c
index a9fd470dced8..21bcccb3c0fc 100644
--- a/drivers/devfreq/m4m-hwmon.c
+++ b/drivers/devfreq/m4m-hwmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -221,7 +221,7 @@ static irqreturn_t m4m_hwmon_intr_handler(int irq, void *dev)
return IRQ_NONE;
}
-static int count_to_mrps(unsigned long count, unsigned int us)
+static int count_to_mrps(uint64_t count, unsigned int us)
{
do_div(count, us);
count++;
diff --git a/drivers/devfreq/msmcci-hwmon.c b/drivers/devfreq/msmcci-hwmon.c
index ad0fa8f6e396..030ed0ffdfb9 100644
--- a/drivers/devfreq/msmcci-hwmon.c
+++ b/drivers/devfreq/msmcci-hwmon.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -224,7 +224,7 @@ static unsigned long mon_read_count_single(struct msmcci_hwmon *m, int idx)
return count;
}
-static int count_to_mrps(unsigned long count, unsigned int us)
+static int count_to_mrps(uint64_t count, unsigned int us)
{
do_div(count, us);
count++;
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 5f3de8bc5882..afb37a1b1ee6 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -397,7 +397,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.busy_mask = 0xFFFFFFFE,
.gpmufw_name = "a618_gmu.bin",
.gpmu_major = 0x1,
- .gpmu_minor = 0x007,
+ .gpmu_minor = 0x008,
},
{
.gpurev = ADRENO_REV_A640,
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 9d92dd78b645..768585782484 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -88,6 +88,14 @@ enum i2c_se_mode {
GSI_ONLY,
};
+struct geni_i2c_clk_fld {
+ u32 clk_freq_out;
+ u8 clk_div;
+ u8 t_high;
+ u8 t_low;
+ u8 t_cycle;
+};
+
struct geni_i2c_dev {
struct device *dev;
void __iomem *base;
@@ -103,7 +111,6 @@ struct geni_i2c_dev {
int cur_rd;
struct device *wrapper_dev;
void *ipcl;
- int clk_fld_idx;
struct dma_chan *tx_c;
struct dma_chan *rx_c;
struct msm_gpi_tre cfg0_t;
@@ -123,6 +130,7 @@ struct geni_i2c_dev {
struct msm_gpi_dma_async_tx_cb_param rx_cb;
enum i2c_se_mode se_mode;
bool cmd_done;
+ struct geni_i2c_clk_fld geni_i2c_clk_param;
};
struct geni_i2c_err_log {
@@ -147,14 +155,6 @@ static struct geni_i2c_err_log gi2c_log[] = {
[GENI_TIMEOUT] = {-ETIMEDOUT, "I2C TXN timed out"},
};
-struct geni_i2c_clk_fld {
- u32 clk_freq_out;
- u8 clk_div;
- u8 t_high;
- u8 t_low;
- u8 t_cycle;
-};
-
static struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
{KHz(100), 7, 10, 11, 26},
{KHz(400), 2, 5, 12, 24},
@@ -164,34 +164,33 @@ static struct geni_i2c_clk_fld geni_i2c_clk_map[] = {
static int geni_i2c_clk_map_idx(struct geni_i2c_dev *gi2c)
{
int i;
- int ret = 0;
- bool clk_map_present = false;
struct geni_i2c_clk_fld *itr = geni_i2c_clk_map;
for (i = 0; i < ARRAY_SIZE(geni_i2c_clk_map); i++, itr++) {
if (itr->clk_freq_out == gi2c->i2c_rsc.clk_freq_out) {
- clk_map_present = true;
- break;
+ gi2c->geni_i2c_clk_param.clk_freq_out =
+ itr->clk_freq_out;
+ gi2c->geni_i2c_clk_param.clk_div = itr->clk_div;
+ gi2c->geni_i2c_clk_param.t_high = itr->t_high;
+ gi2c->geni_i2c_clk_param.t_low = itr->t_low;
+ gi2c->geni_i2c_clk_param.t_cycle = itr->t_cycle;
+ return 0;
}
}
- if (clk_map_present)
- gi2c->clk_fld_idx = i;
- else
- ret = -EINVAL;
-
- return ret;
+ return -EINVAL;
}
static inline void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c, int dfs)
{
- struct geni_i2c_clk_fld *itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
-
geni_write_reg(dfs, gi2c->base, SE_GENI_CLK_SEL);
- geni_write_reg((itr->clk_div << 4) | 1, gi2c->base, GENI_SER_M_CLK_CFG);
- geni_write_reg(((itr->t_high << 20) | (itr->t_low << 10) |
- itr->t_cycle), gi2c->base, SE_I2C_SCL_COUNTERS);
+ geni_write_reg((gi2c->geni_i2c_clk_param.clk_div << 4) | 1,
+ gi2c->base, GENI_SER_M_CLK_CFG);
+ geni_write_reg(((gi2c->geni_i2c_clk_param.t_high << 20) |
+ (gi2c->geni_i2c_clk_param.t_low << 10) |
+ gi2c->geni_i2c_clk_param.t_cycle), gi2c->base,
+ SE_I2C_SCL_COUNTERS);
/*
* Ensure Clk config completes before return.
@@ -201,15 +200,12 @@ static inline void qcom_geni_i2c_conf(struct geni_i2c_dev *gi2c, int dfs)
static inline void qcom_geni_i2c_calc_timeout(struct geni_i2c_dev *gi2c)
{
-
- struct geni_i2c_clk_fld *clk_itr = geni_i2c_clk_map + gi2c->clk_fld_idx;
- size_t bit_cnt = gi2c->cur->len*9;
- size_t bit_usec = (bit_cnt*USEC_PER_SEC)/clk_itr->clk_freq_out;
- size_t xfer_max_usec = (bit_usec*I2C_TIMEOUT_SAFETY_COEFFICIENT) +
+ size_t bit_usec = (gi2c->cur->len * 9 * USEC_PER_SEC)/
+ gi2c->geni_i2c_clk_param.clk_freq_out;
+ size_t xfer_max_usec = (bit_usec * I2C_TIMEOUT_SAFETY_COEFFICIENT) +
I2C_TIMEOUT_MIN_USEC;
gi2c->xfer_timeout = usecs_to_jiffies(xfer_max_usec);
-
}
static void geni_i2c_err(struct geni_i2c_dev *gi2c, int err)
@@ -477,18 +473,16 @@ static int geni_i2c_gsi_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
}
if (!gi2c->cfg_sent) {
- struct geni_i2c_clk_fld *itr = geni_i2c_clk_map +
- gi2c->clk_fld_idx;
struct msm_gpi_tre *cfg0 = &gi2c->cfg0_t;
/* config0 */
cfg0->dword[0] = MSM_GPI_I2C_CONFIG0_TRE_DWORD0(I2C_PACK_EN,
- itr->t_cycle,
- itr->t_high,
- itr->t_low);
+ gi2c->geni_i2c_clk_param.t_cycle,
+ gi2c->geni_i2c_clk_param.t_high,
+ gi2c->geni_i2c_clk_param.t_low);
cfg0->dword[1] = MSM_GPI_I2C_CONFIG0_TRE_DWORD1(0, 0);
cfg0->dword[2] = MSM_GPI_I2C_CONFIG0_TRE_DWORD2(0,
- itr->clk_div);
+ gi2c->geni_i2c_clk_param.clk_div);
cfg0->dword[3] = MSM_GPI_I2C_CONFIG0_TRE_DWORD3(0, 0, 0, 0, 1);
gi2c->tx_cb.userdata = gi2c;
@@ -806,6 +800,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
struct device_node *wrapper_ph_node;
int ret;
char boot_marker[40];
+ u32 geni_i2c_clk_map_dt[5];
gi2c = devm_kzalloc(&pdev->dev, sizeof(*gi2c), GFP_KERNEL);
if (!gi2c)
@@ -892,11 +887,29 @@ static int geni_i2c_probe(struct platform_device *pdev)
return ret;
}
- if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
+ if (of_property_read_u32_array(pdev->dev.of_node, "qcom,clk-cfg",
+ geni_i2c_clk_map_dt, 5)) {
+ if (of_property_read_u32(pdev->dev.of_node, "qcom,clk-freq-out",
&gi2c->i2c_rsc.clk_freq_out)) {
- dev_info(&pdev->dev,
- "Bus frequency not specified, default to 400KHz.\n");
- gi2c->i2c_rsc.clk_freq_out = KHz(400);
+ dev_info(&pdev->dev,
+ "Bus frequency not specified, default to 400KHz.\n");
+ gi2c->i2c_rsc.clk_freq_out = KHz(400);
+ }
+ ret = geni_i2c_clk_map_idx(gi2c);
+ if (ret) {
+ dev_err(gi2c->dev, "Invalid clk frequency %d KHz: %d\n",
+ gi2c->i2c_rsc.clk_freq_out, ret);
+ return ret;
+ }
+ } else {
+ gi2c->geni_i2c_clk_param.clk_freq_out = geni_i2c_clk_map_dt[0];
+ gi2c->geni_i2c_clk_param.clk_div = (u8)geni_i2c_clk_map_dt[1];
+ gi2c->geni_i2c_clk_param.t_high = (u8)geni_i2c_clk_map_dt[2];
+ gi2c->geni_i2c_clk_param.t_low = (u8)geni_i2c_clk_map_dt[3];
+ gi2c->geni_i2c_clk_param.t_cycle = (u8)geni_i2c_clk_map_dt[4];
+ gi2c->i2c_rsc.clk_freq_out =
+ gi2c->geni_i2c_clk_param.clk_freq_out;
+ dev_info(&pdev->dev, "Clk-cfg array present\n");
}
gi2c->irq = platform_get_irq(pdev, 0);
@@ -905,13 +918,6 @@ static int geni_i2c_probe(struct platform_device *pdev)
return gi2c->irq;
}
- ret = geni_i2c_clk_map_idx(gi2c);
- if (ret) {
- dev_err(gi2c->dev, "Invalid clk frequency %d KHz: %d\n",
- gi2c->i2c_rsc.clk_freq_out, ret);
- return ret;
- }
-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c
index 02073ae3a950..fab210e6bdd5 100644
--- a/drivers/iio/adc/qcom-spmi-adc5.c
+++ b/drivers/iio/adc/qcom-spmi-adc5.c
@@ -85,9 +85,10 @@
* clock rate, fast average samples with no measurement in queue.
* Set the timeout to a max of 100ms.
*/
-#define ADC_CONV_TIME_MIN_US 263
-#define ADC_CONV_TIME_MAX_US 264
-#define ADC_CONV_TIME_RETRY 400
+#define ADC_POLL_DELAY_MIN_US 10000
+#define ADC_POLL_DELAY_MAX_US 10001
+#define ADC_CONV_TIME_RETRY_POLL 40
+#define ADC_CONV_TIME_RETRY 30
#define ADC_CONV_TIMEOUT msecs_to_jiffies(100)
/* CAL peripheral */
@@ -274,13 +275,16 @@ static int adc_read_voltage_data(struct adc_chip *adc, u16 *data)
return ret;
}
-static int adc_poll_wait_eoc(struct adc_chip *adc)
+static int adc_poll_wait_eoc(struct adc_chip *adc, bool poll_only)
{
unsigned int count, retry;
u8 status1;
int ret;
- retry = ADC_CONV_TIME_RETRY;
+ if (poll_only)
+ retry = ADC_CONV_TIME_RETRY_POLL;
+ else
+ retry = ADC_CONV_TIME_RETRY;
for (count = 0; count < retry; count++) {
ret = adc_read(adc, ADC_USR_STATUS1, &status1, 1);
@@ -290,7 +294,7 @@ static int adc_poll_wait_eoc(struct adc_chip *adc)
status1 &= ADC_USR_STATUS1_REQ_STS_EOC_MASK;
if (status1 == ADC_USR_STATUS1_EOC)
return 0;
- usleep_range(ADC_CONV_TIME_MIN_US, ADC_CONV_TIME_MAX_US);
+ usleep_range(ADC_POLL_DELAY_MIN_US, ADC_POLL_DELAY_MAX_US);
}
return -ETIMEDOUT;
@@ -301,7 +305,7 @@ static int adc_wait_eoc(struct adc_chip *adc)
int ret;
if (adc->poll_eoc) {
- ret = adc_poll_wait_eoc(adc);
+ ret = adc_poll_wait_eoc(adc, true);
if (ret < 0) {
pr_err("EOC bit not set\n");
return ret;
@@ -311,7 +315,7 @@ static int adc_wait_eoc(struct adc_chip *adc)
ADC_CONV_TIMEOUT);
if (!ret) {
pr_debug("Did not get completion timeout.\n");
- ret = adc_poll_wait_eoc(adc);
+ ret = adc_poll_wait_eoc(adc, false);
if (ret < 0) {
pr_err("EOC bit not set\n");
return ret;
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
index d9f508e2972b..2e4d1c171149 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -277,7 +277,7 @@ int cam_icp_context_init(struct cam_icp_context *ctx,
}
rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
- NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+ NULL, hw_intf, ctx->req_base, CAM_ICP_CTX_REQ_MAX);
if (rc) {
CAM_ERR(CAM_ICP, "Camera Context Base init failed");
goto err;
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
index 0c3a360c7de5..b8f749811f64 100644
--- a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#define _CAM_ICP_CONTEXT_H_
#include "cam_context.h"
+#include "cam_icp_hw_mgr_intf.h"
/**
* struct cam_icp_context - icp context
@@ -26,7 +27,7 @@
struct cam_icp_context {
struct cam_context *base;
struct cam_ctx_ops *state_machine;
- struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+ struct cam_ctx_request req_base[CAM_ICP_CTX_REQ_MAX];
uint32_t state;
void *ctxt_to_hw_map;
};
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
index 5a6bc0b644ca..f7375794a71f 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -849,9 +849,6 @@ static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_debug_clk;
hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_debug_clk;
- hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_debug_clk;
- hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_debug_clk;
- hw_mgr_clk_info->compressed_bw_ab = icp_hw_mgr.icp_debug_clk;
CAM_DBG(CAM_ICP, "bc = %d cc = %d",
hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
return true;
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
index d5eb96f805c6..7219f2be947c 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -30,7 +30,7 @@
#define CAM_ICP_ROLE_PARENT 1
#define CAM_ICP_ROLE_CHILD 2
-#define CAM_FRAME_CMD_MAX 20
+#define CAM_FRAME_CMD_MAX CAM_ICP_CTX_REQ_MAX
#define CAM_MAX_OUT_RES 6
#define CAM_MAX_IN_RES 8
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
index f4e04ffeea4c..1e365e2ddbad 100644
--- a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -25,6 +25,8 @@
#define CAM_ICP_CTX_MAX 54
+#define CAM_ICP_CTX_REQ_MAX 40
+
#define CPAS_IPE1_BIT 0x2000
#define CAM_ICP_DUMP_TAG_MAX_LEN 32
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
index 0fce972a8410..83e9278addbe 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175_130.h
@@ -249,6 +249,9 @@ static struct cam_vfe_bus_ver2_stats_cfg_info stats_175_130_info = {
},
/* CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG */
{
+ },
+ /* CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF */
+ {
.res_index = CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
.cfg_offset = 0x00000AE4,
.num_cfg = 0x00000000,
@@ -602,6 +605,8 @@ static struct cam_vfe_bus_ver2_hw_info vfe175_130_bus_hw_info = {
.addr_sync_cfg = 0x0000207C,
.addr_sync_frame_hdr = 0x00002080,
.addr_sync_no_sync = 0x00002084,
+ .debug_status_cfg = 0x0000226C,
+ .debug_status_0 = 0x00002270,
},
.num_client = 24,
.bus_client_reg = {
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 8d39021e75b3..fb1f62f64aa1 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -246,6 +246,7 @@ static ssize_t npu_debug_off_read(struct file *file,
len = scnprintf(buf, sizeof(buf), "offset=0x%08x cnt=%d\n",
debugfs->reg_off, debugfs->reg_cnt);
+ len = min(len, count);
if (copy_to_user(user_buf, buf, len)) {
pr_err("failed to copy to user\n");
@@ -275,49 +276,21 @@ static ssize_t npu_debug_log_read(struct file *file,
mutex_lock(&debugfs->log_lock);
if (debugfs->log_num_bytes_buffered != 0) {
- if ((debugfs->log_read_index +
- debugfs->log_num_bytes_buffered) >
- debugfs->log_buf_size) {
- /* Wrap around case */
- uint32_t remaining_to_end = debugfs->log_buf_size -
- debugfs->log_read_index;
- uint8_t *src_addr = debugfs->log_buf +
- debugfs->log_read_index;
- uint8_t *dst_addr = user_buf;
-
- if (copy_to_user(dst_addr, src_addr,
- remaining_to_end)) {
- pr_err("%s failed to copy to user\n", __func__);
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- src_addr = debugfs->log_buf;
- dst_addr = user_buf + remaining_to_end;
- if (copy_to_user(dst_addr, src_addr,
- debugfs->log_num_bytes_buffered -
- remaining_to_end)) {
- pr_err("%s failed to copy to user\n", __func__);
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index =
- debugfs->log_num_bytes_buffered -
- remaining_to_end;
- } else {
- if (copy_to_user(user_buf, (debugfs->log_buf +
- debugfs->log_read_index),
- debugfs->log_num_bytes_buffered)) {
- pr_err("%s failed to copy to user\n", __func__);
- mutex_unlock(&debugfs->log_lock);
- return -EFAULT;
- }
- debugfs->log_read_index +=
- debugfs->log_num_bytes_buffered;
- if (debugfs->log_read_index == debugfs->log_buf_size)
- debugfs->log_read_index = 0;
+ len = min(debugfs->log_num_bytes_buffered,
+ debugfs->log_buf_size - debugfs->log_read_index);
+ len = min(count, len);
+ if (copy_to_user(user_buf, (debugfs->log_buf +
+ debugfs->log_read_index), len)) {
+ pr_err("%s failed to copy to user\n", __func__);
+ mutex_unlock(&debugfs->log_lock);
+ return -EFAULT;
}
- len = debugfs->log_num_bytes_buffered;
- debugfs->log_num_bytes_buffered = 0;
+ debugfs->log_read_index += len;
+ if (debugfs->log_read_index == debugfs->log_buf_size)
+ debugfs->log_read_index = 0;
+
+ debugfs->log_num_bytes_buffered -= len;
+ *ppos += len;
}
/* mutex log unlock */
diff --git a/drivers/media/platform/msm/npu_v2/npu_common.h b/drivers/media/platform/msm/npu_v2/npu_common.h
index 28f9cc8d7990..854d83cc01f4 100644
--- a/drivers/media/platform/msm/npu_v2/npu_common.h
+++ b/drivers/media/platform/msm/npu_v2/npu_common.h
@@ -113,8 +113,6 @@ struct npu_debugfs_ctx {
struct dentry *root;
uint32_t reg_off;
uint32_t reg_cnt;
- char *buf;
- size_t buf_len;
uint8_t *log_buf;
struct mutex log_lock;
uint32_t log_num_bytes_buffered;
@@ -123,6 +121,12 @@ struct npu_debugfs_ctx {
uint32_t log_buf_size;
};
+struct npu_debugfs_reg_ctx {
+ char *buf;
+ size_t buf_len;
+ struct npu_device *npu_dev;
+};
+
struct npu_mbox {
struct mbox_client client;
struct mbox_chan *chan;
@@ -339,5 +343,5 @@ void disable_fw(struct npu_device *npu_dev);
int load_fw(struct npu_device *npu_dev);
int unload_fw(struct npu_device *npu_dev);
int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab);
-
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt);
#endif /* _NPU_COMMON_H */
diff --git a/drivers/media/platform/msm/npu_v2/npu_debugfs.c b/drivers/media/platform/msm/npu_v2/npu_debugfs.c
index 137e733981e1..16d176ccabb0 100644
--- a/drivers/media/platform/msm/npu_v2/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu_v2/npu_debugfs.c
@@ -29,6 +29,7 @@
*/
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
+static int npu_debug_reg_open(struct inode *inode, struct file *file);
static int npu_debug_reg_release(struct inode *inode, struct file *file);
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos);
@@ -47,7 +48,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
static struct npu_device *g_npu_dev;
static const struct file_operations npu_reg_fops = {
- .open = npu_debug_open,
+ .open = npu_debug_reg_open,
.release = npu_debug_reg_release,
.read = npu_debug_reg_read,
};
@@ -89,16 +90,28 @@ static int npu_debug_release(struct inode *inode, struct file *file)
return 0;
}
-static int npu_debug_reg_release(struct inode *inode, struct file *file)
+static int npu_debug_reg_open(struct inode *inode, struct file *file)
{
- struct npu_device *npu_dev = file->private_data;
- struct npu_debugfs_ctx *debugfs;
+ struct npu_debugfs_reg_ctx *reg_ctx;
- debugfs = &npu_dev->debugfs_ctx;
+ reg_ctx = kzalloc(sizeof(*reg_ctx), GFP_KERNEL);
+ if (!reg_ctx)
+ return -ENOMEM;
+
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ reg_ctx->npu_dev = inode->i_private;
+ file->private_data = reg_ctx;
+ return 0;
+}
- kfree(debugfs->buf);
- debugfs->buf_len = 0;
- debugfs->buf = NULL;
+static int npu_debug_reg_release(struct inode *inode, struct file *file)
+{
+ struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
+
+ kfree(reg_ctx->buf);
+ kfree(reg_ctx);
+ file->private_data = NULL;
return 0;
}
@@ -108,7 +121,8 @@ static int npu_debug_reg_release(struct inode *inode, struct file *file)
static ssize_t npu_debug_reg_read(struct file *file,
char __user *user_buf, size_t count, loff_t *ppos)
{
- struct npu_device *npu_dev = file->private_data;
+ struct npu_debugfs_reg_ctx *reg_ctx = file->private_data;
+ struct npu_device *npu_dev = reg_ctx->npu_dev;
struct npu_debugfs_ctx *debugfs;
size_t len;
@@ -117,16 +131,16 @@ static ssize_t npu_debug_reg_read(struct file *file,
if (debugfs->reg_cnt == 0)
return 0;
- if (!debugfs->buf) {
+ if (!reg_ctx->buf) {
char dump_buf[64];
char *ptr;
int cnt, tot, off;
- debugfs->buf_len = sizeof(dump_buf) *
+ reg_ctx->buf_len = sizeof(dump_buf) *
DIV_ROUND_UP(debugfs->reg_cnt, ROW_BYTES);
- debugfs->buf = kzalloc(debugfs->buf_len, GFP_KERNEL);
+ reg_ctx->buf = kzalloc(reg_ctx->buf_len, GFP_KERNEL);
- if (!debugfs->buf)
+ if (!reg_ctx->buf)
return -ENOMEM;
ptr = (char *) (npu_dev->core_io.base + debugfs->reg_off);
@@ -140,28 +154,28 @@ static ssize_t npu_debug_reg_read(struct file *file,
hex_dump_to_buffer(ptr, min(cnt, ROW_BYTES),
ROW_BYTES, GROUP_BYTES, dump_buf,
sizeof(dump_buf), false);
- len = scnprintf(debugfs->buf + tot,
- debugfs->buf_len - tot, "0x%08x: %s\n",
+ len = scnprintf(reg_ctx->buf + tot,
+ reg_ctx->buf_len - tot, "0x%08x: %s\n",
((int) (unsigned long) ptr) -
((int) (unsigned long) npu_dev->core_io.base),
dump_buf);
ptr += ROW_BYTES;
tot += len;
- if (tot >= debugfs->buf_len)
+ if (tot >= reg_ctx->buf_len)
break;
}
npu_disable_core_power(npu_dev);
- debugfs->buf_len = tot;
+ reg_ctx->buf_len = tot;
}
- if (*ppos >= debugfs->buf_len)
+ if (*ppos >= reg_ctx->buf_len)
return 0; /* done reading */
- len = min(count, debugfs->buf_len - (size_t) *ppos);
- NPU_DBG("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
- if (copy_to_user(user_buf, debugfs->buf + *ppos, len)) {
+ len = min(count, reg_ctx->buf_len - (size_t) *ppos);
+ NPU_DBG("read %zi %zi\n", count, reg_ctx->buf_len - (size_t) *ppos);
+ if (copy_to_user(user_buf, reg_ctx->buf + *ppos, len)) {
NPU_ERR("failed to copy to user\n");
return -EFAULT;
}
diff --git a/drivers/media/platform/msm/npu_v2/npu_dev.c b/drivers/media/platform/msm/npu_v2/npu_dev.c
index a161381eb49f..c0d028d28603 100644
--- a/drivers/media/platform/msm/npu_v2/npu_dev.c
+++ b/drivers/media/platform/msm/npu_v2/npu_dev.c
@@ -13,6 +13,7 @@
/*
* Includes
*/
+#include <dt-bindings/msm/msm-bus-ids.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -111,8 +112,8 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
static int npu_pwrctrl_init(struct npu_device *npu_dev);
static int npu_probe(struct platform_device *pdev);
static int npu_remove(struct platform_device *pdev);
-static int npu_suspend(struct platform_device *dev, pm_message_t state);
-static int npu_resume(struct platform_device *dev);
+static int npu_pm_suspend(struct device *dev);
+static int npu_pm_resume(struct device *dev);
static int __init npu_init(void);
static void __exit npu_exit(void);
@@ -188,17 +189,17 @@ static const struct of_device_id npu_dt_match[] = {
{}
};
+static const struct dev_pm_ops npu_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(npu_pm_suspend, npu_pm_resume)
+};
+
static struct platform_driver npu_driver = {
.probe = npu_probe,
.remove = npu_remove,
-#if defined(CONFIG_PM)
- .suspend = npu_suspend,
- .resume = npu_resume,
-#endif
.driver = {
.name = "msm_npu",
.of_match_table = npu_dt_match,
- .pm = NULL,
+ .pm = &npu_pm_ops,
},
};
@@ -1310,28 +1311,6 @@ static int npu_exec_network_v2(struct npu_client *client,
return ret;
}
-static int npu_process_kevent(struct npu_kevent *kevt)
-{
- int ret = 0;
-
- switch (kevt->evt.type) {
- case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
- ret = copy_to_user((void __user *)kevt->reserved[1],
- (void *)&kevt->reserved[0],
- kevt->evt.u.exec_v2_done.stats_buf_size);
- if (ret) {
- NPU_ERR("fail to copy to user\n");
- kevt->evt.u.exec_v2_done.stats_buf_size = 0;
- ret = -EFAULT;
- }
- break;
- default:
- break;
- }
-
- return ret;
-}
-
static int npu_receive_event(struct npu_client *client,
unsigned long arg)
{
@@ -1347,7 +1326,7 @@ static int npu_receive_event(struct npu_client *client,
kevt = list_first_entry(&client->evt_list,
struct npu_kevent, list);
list_del(&kevt->list);
- npu_process_kevent(kevt);
+ npu_process_kevent(client, kevt);
ret = copy_to_user(argp, &kevt->evt,
sizeof(struct msm_npu_event));
if (ret) {
@@ -1463,6 +1442,21 @@ static int npu_get_property(struct npu_client *client,
case MSM_NPU_PROP_ID_HARDWARE_VERSION:
prop.prop_param[0] = npu_dev->hw_version;
break;
+ case MSM_NPU_PROP_ID_IPC_QUEUE_INFO:
+ ret = npu_host_get_ipc_queue_size(npu_dev,
+ prop.prop_param[0]);
+ if (ret < 0) {
+ NPU_ERR("Can't get ipc queue %d size\n",
+ prop.prop_param[0]);
+ return ret;
+ }
+
+ prop.prop_param[1] = ret;
+ break;
+ case MSM_NPU_PROP_ID_DRV_FEATURE:
+ prop.prop_param[0] = MSM_NPU_FEATURE_MULTI_EXECUTE |
+ MSM_NPU_FEATURE_ASYNC_EXECUTE;
+ break;
default:
ret = npu_host_get_fw_property(client->npu_dev, &prop);
if (ret) {
@@ -1580,8 +1574,9 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
sizeof(core_clks[i].clk_name));
core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name);
if (IS_ERR(core_clks[i].clk)) {
- NPU_ERR("unable to get clk: %s\n", clock_name);
- rc = -EINVAL;
+ if (PTR_ERR(core_clks[i].clk) != -EPROBE_DEFER)
+ NPU_ERR("unable to get clk: %s\n", clock_name);
+ rc = PTR_ERR(core_clks[i].clk);
break;
}
@@ -1642,15 +1637,15 @@ regulator_err:
static int npu_parse_dt_bw(struct npu_device *npu_dev)
{
- int ret, len;
- uint32_t ports[2];
+ int ret, len, num_paths, i;
+ uint32_t ports[MAX_PATHS * 2];
struct platform_device *pdev = npu_dev->pdev;
struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
if (of_find_property(pdev->dev.of_node, "qcom,src-dst-ports", &len)) {
len /= sizeof(ports[0]);
- if (len != 2) {
- NPU_ERR("Unexpected number of ports\n");
+ if (len % 2 || len > ARRAY_SIZE(ports)) {
+ NPU_ERR("Unexpected number of ports %d\n", len);
return -EINVAL;
}
@@ -1660,6 +1655,7 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
NPU_ERR("Failed to read bw property\n");
return ret;
}
+ num_paths = len / 2;
} else {
NPU_ERR("can't find bw property\n");
return -EINVAL;
@@ -1672,13 +1668,15 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
bwctrl->bw_data.name = dev_name(&pdev->dev);
bwctrl->bw_data.active_only = false;
- bwctrl->bw_levels[0].vectors[0].src = ports[0];
- bwctrl->bw_levels[0].vectors[0].dst = ports[1];
- bwctrl->bw_levels[1].vectors[0].src = ports[0];
- bwctrl->bw_levels[1].vectors[0].dst = ports[1];
- bwctrl->bw_levels[0].num_paths = 1;
- bwctrl->bw_levels[1].num_paths = 1;
- bwctrl->num_paths = 1;
+ for (i = 0; i < num_paths; i++) {
+ bwctrl->bw_levels[0].vectors[i].src = ports[2 * i];
+ bwctrl->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+ bwctrl->bw_levels[1].vectors[i].src = ports[2 * i];
+ bwctrl->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+ }
+ bwctrl->bw_levels[0].num_paths = num_paths;
+ bwctrl->bw_levels[1].num_paths = num_paths;
+ bwctrl->num_paths = num_paths;
bwctrl->bus_client = msm_bus_scale_register_client(&bwctrl->bw_data);
if (!bwctrl->bus_client) {
@@ -1693,7 +1691,7 @@ static int npu_parse_dt_bw(struct npu_device *npu_dev)
int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
{
- int i, ret;
+ int i, j, ret;
struct npu_bwctrl *bwctrl = &npu_dev->bwctrl;
if (!bwctrl->bus_client) {
@@ -1706,10 +1704,17 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
i = (bwctrl->cur_idx + 1) % DBL_BUF;
- bwctrl->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
- bwctrl->bw_levels[i].vectors[0].ab = new_ab / bwctrl->num_paths * MBYTE;
- bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
- bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
+ for (j = 0; j < bwctrl->num_paths; j++) {
+ if ((bwctrl->bw_levels[i].vectors[j].dst ==
+ MSM_BUS_SLAVE_CLK_CTL) && (new_ib > 0)) {
+ bwctrl->bw_levels[i].vectors[j].ib = 1;
+ bwctrl->bw_levels[i].vectors[j].ab = 1;
+ } else {
+ bwctrl->bw_levels[i].vectors[j].ib = new_ib * MBYTE;
+ bwctrl->bw_levels[i].vectors[j].ab =
+ new_ab * MBYTE / bwctrl->num_paths;
+ }
+ }
ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
if (ret) {
@@ -2198,7 +2203,7 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->pdev = pdev;
mutex_init(&npu_dev->dev_lock);
- platform_set_drvdata(pdev, npu_dev);
+ dev_set_drvdata(&pdev->dev, npu_dev);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "core");
if (!res) {
@@ -2425,6 +2430,7 @@ error_class_create:
unregister_chrdev_region(npu_dev->dev_num, 1);
npu_mbox_deinit(npu_dev);
error_get_dev_num:
+ dev_set_drvdata(&pdev->dev, NULL);
return rc;
}
@@ -2442,7 +2448,7 @@ static int npu_remove(struct platform_device *pdev)
device_destroy(npu_dev->class, npu_dev->dev_num);
class_destroy(npu_dev->class);
unregister_chrdev_region(npu_dev->dev_num, 1);
- platform_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
npu_mbox_deinit(npu_dev);
msm_bus_scale_unregister_client(npu_dev->bwctrl.bus_client);
@@ -2454,17 +2460,27 @@ static int npu_remove(struct platform_device *pdev)
/*
* Suspend/Resume
*/
-#if defined(CONFIG_PM)
-static int npu_suspend(struct platform_device *dev, pm_message_t state)
+static int npu_pm_suspend(struct device *dev)
{
+ struct npu_device *npu_dev;
+
+ npu_dev = dev_get_drvdata(dev);
+ if (!npu_dev) {
+ NPU_ERR("invalid NPU dev\n");
+ return -EINVAL;
+ }
+
+ NPU_DBG("suspend npu\n");
+ npu_host_suspend(npu_dev);
+
return 0;
}
-static int npu_resume(struct platform_device *dev)
+static int npu_pm_resume(struct device *dev)
{
+ NPU_DBG("resume npu\n");
return 0;
}
-#endif
/*
* Module Entry Points
diff --git a/drivers/media/platform/msm/npu_v2/npu_host_ipc.c b/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
index e2fc1ff9740a..bf36cbdcd4af 100644
--- a/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu_v2/npu_host_ipc.c
@@ -412,3 +412,13 @@ int npu_host_ipc_post_init(struct npu_device *npu_dev)
{
return 0;
}
+
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx)
+{
+ if (q_idx >= ARRAY_SIZE(npu_q_setup)) {
+ NPU_ERR("Invalid ipc queue index %d\n", q_idx);
+ return -EINVAL;
+ }
+
+ return npu_q_setup[q_idx].size;
+}
diff --git a/drivers/media/platform/msm/npu_v2/npu_hw_access.c b/drivers/media/platform/msm/npu_v2/npu_hw_access.c
index e0269210fe6b..75a3c83798ca 100644
--- a/drivers/media/platform/msm/npu_v2/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu_v2/npu_hw_access.c
@@ -148,6 +148,8 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
dst_off += 1;
}
}
+
+ __iowmb();
}
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
diff --git a/drivers/media/platform/msm/npu_v2/npu_mgr.c b/drivers/media/platform/msm/npu_v2/npu_mgr.c
index d914ebfe826e..8170d94970fe 100644
--- a/drivers/media/platform/msm/npu_v2/npu_mgr.c
+++ b/drivers/media/platform/msm/npu_v2/npu_mgr.c
@@ -61,16 +61,36 @@ static void host_session_msg_hdlr(struct npu_device *npu_dev);
static void host_session_log_hdlr(struct npu_device *npu_dev);
static int host_error_hdlr(struct npu_device *npu_dev, bool force);
static int npu_send_network_cmd(struct npu_device *npu_dev,
- struct npu_network *network, void *cmd_ptr, bool async, bool force);
+ struct npu_network *network, void *cmd_ptr,
+ struct npu_network_cmd *cmd);
static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
- void *cmd_ptr);
+ void *cmd_ptr, struct npu_misc_cmd *cmd);
static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt);
static int npu_notify_aop(struct npu_device *npu_dev, bool on);
static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
uint32_t pwr_level, bool post);
static int load_fw_nolock(struct npu_device *npu_dev, bool enable);
-static void disable_fw_nolock(struct npu_device *npu_dev);
+static int disable_fw_nolock(struct npu_device *npu_dev);
static int update_dcvs_activity(struct npu_device *npu_dev, uint32_t activity);
+static void npu_queue_network_cmd(struct npu_network *network,
+ struct npu_network_cmd *cmd);
+static void npu_dequeue_network_cmd(struct npu_network *network,
+ struct npu_network_cmd *cmd);
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+ uint32_t trans_id);
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+ uint32_t stats_buf_size);
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+ struct npu_network_cmd *cmd);
+static struct npu_misc_cmd *npu_alloc_misc_cmd(struct npu_host_ctx *ctx);
+static void npu_free_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd);
+static void npu_queue_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd);
+static void npu_dequeue_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd);
+static struct npu_misc_cmd *npu_find_misc_cmd(struct npu_host_ctx *ctx,
+ uint32_t trans_id);
/*
* Function Definitions - Init / Deinit
@@ -80,6 +100,7 @@ static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
{
uint32_t reg_val = NPU_CPC_PWR_ON;
uint32_t wait_cnt = 0, max_wait_ms;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
max_wait_ms = NPU_FW_TIMEOUT_MS;
@@ -90,10 +111,16 @@ static int wait_npu_cpc_power_off(struct npu_device *npu_dev)
break;
}
+ if ((host_ctx->wdg_irq_sts != 0) ||
+ (host_ctx->err_irq_sts != 0)) {
+ NPU_WARN("fw is in bad state, skip wait\n");
+ return -EIO;
+ }
+
wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
if (wait_cnt > max_wait_ms) {
NPU_ERR("timeout wait for cpc power off\n");
- return -EPERM;
+ return -ETIMEDOUT;
}
msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
} while (1);
@@ -310,7 +337,10 @@ static int enable_fw_nolock(struct npu_device *npu_dev)
reinit_completion(&host_ctx->fw_bringup_done);
ret = npu_notify_fw_pwr_state(npu_dev, npu_dev->pwrctrl.active_pwrlevel,
true);
- if (ret) {
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("notify fw power state timed out\n");
+ goto enable_pw_fail;
+ } else if (ret) {
NPU_ERR("notify fw power state failed\n");
goto notify_fw_pwr_fail;
}
@@ -320,7 +350,7 @@ static int enable_fw_nolock(struct npu_device *npu_dev)
if (!ret) {
NPU_ERR("Wait for fw bringup timedout\n");
ret = -ETIMEDOUT;
- goto notify_fw_pwr_fail;
+ goto enable_pw_fail;
} else {
ret = 0;
}
@@ -358,17 +388,21 @@ int enable_fw(struct npu_device *npu_dev)
ret = enable_fw_nolock(npu_dev);
mutex_unlock(&host_ctx->lock);
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("Enable fw timedout, force SSR\n");
+ host_error_hdlr(npu_dev, true);
+ }
return ret;
}
-static void disable_fw_nolock(struct npu_device *npu_dev)
+static int disable_fw_nolock(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
int ret = 0;
if (!host_ctx->fw_ref_cnt) {
NPU_WARN("fw_ref_cnt is 0\n");
- return;
+ return ret;
}
host_ctx->fw_ref_cnt--;
@@ -376,16 +410,20 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
if (host_ctx->fw_state != FW_ENABLED) {
NPU_ERR("fw is not enabled\n");
- return;
+ return ret;
}
if (host_ctx->fw_ref_cnt > 0)
- return;
+ return ret;
/* turn on auto ACK for warm shuts down */
npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, 3);
reinit_completion(&host_ctx->fw_shutdown_done);
- if (npu_notify_fw_pwr_state(npu_dev, NPU_PWRLEVEL_OFF, false)) {
+ ret = npu_notify_fw_pwr_state(npu_dev, NPU_PWRLEVEL_OFF, false);
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("notify fw pwr off timed out\n");
+ goto fail;
+ } else if (ret) {
NPU_WARN("notify fw pwr off failed\n");
msleep(500);
}
@@ -393,10 +431,15 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
if (!host_ctx->auto_pil_disable) {
ret = wait_for_completion_timeout(
&host_ctx->fw_shutdown_done, NW_RSC_TIMEOUT_MS);
- if (!ret)
+ if (!ret) {
NPU_ERR("Wait for fw shutdown timedout\n");
- else
+ ret = -ETIMEDOUT;
+ goto fail;
+ } else {
ret = wait_npu_cpc_power_off(npu_dev);
+ if (ret)
+ goto fail;
+ }
}
npu_disable_irq(npu_dev);
@@ -413,15 +456,24 @@ static void disable_fw_nolock(struct npu_device *npu_dev)
host_ctx->fw_state = FW_UNLOADED;
NPU_DBG("fw is unloaded\n");
}
+
+fail:
+ return ret;
}
void disable_fw(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ int ret = 0;
mutex_lock(&host_ctx->lock);
- disable_fw_nolock(npu_dev);
+ ret = disable_fw_nolock(npu_dev);
mutex_unlock(&host_ctx->lock);
+
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("disable fw timedout, force SSR\n");
+ host_error_hdlr(npu_dev, true);
+ }
}
/* notify fw current power level */
@@ -432,6 +484,7 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
struct ipc_cmd_notify_pwr_pkt pwr_notify_pkt;
int ret = 0;
uint32_t reg_val;
+ struct npu_misc_cmd *misc_cmd = NULL;
/* Clear PWR_NOTIFY bits before sending cmd */
reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
@@ -456,8 +509,17 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
pwr_notify_pkt.notify_type = post ?
NPU_POWER_POST_NOTIFY : NPU_POWER_PRE_NOTIFY;
+ misc_cmd = npu_alloc_misc_cmd(host_ctx);
+ if (!misc_cmd) {
+ NPU_ERR("Can't allocate misc_cmd\n");
+ return -ENOMEM;
+ }
+
+ misc_cmd->cmd_type = NPU_IPC_CMD_NOTIFY_PWR;
+ misc_cmd->trans_id = pwr_notify_pkt.header.trans_id;
+
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
- &pwr_notify_pkt);
+ &pwr_notify_pkt, misc_cmd);
if (ret) {
NPU_ERR("NPU_IPC_CMD_NOTIFY_PWR sent failed: %d\n", ret);
@@ -471,11 +533,10 @@ static int npu_notify_fw_pwr_state(struct npu_device *npu_dev,
ret = -EPERM;
}
}
-
- /* allow to send another misc cmd if timedout */
- host_ctx->misc_cmd_pending = false;
}
+ npu_free_misc_cmd(host_ctx, misc_cmd);
+
return ret;
}
@@ -606,11 +667,10 @@ int npu_host_update_power(struct npu_device *npu_dev)
int npu_host_init(struct npu_device *npu_dev)
{
- int sts = 0;
+ int ret = 0;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
memset(host_ctx, 0, sizeof(*host_ctx));
- init_completion(&host_ctx->misc_cmd_done);
init_completion(&host_ctx->fw_deinit_done);
init_completion(&host_ctx->fw_bringup_done);
init_completion(&host_ctx->fw_shutdown_done);
@@ -625,7 +685,7 @@ int npu_host_init(struct npu_device *npu_dev)
&host_ctx->nb);
if (IS_ERR(host_ctx->notif_hdle)) {
NPU_ERR("register event notification failed\n");
- sts = PTR_ERR(host_ctx->notif_hdle);
+ ret = PTR_ERR(host_ctx->notif_hdle);
host_ctx->notif_hdle = NULL;
goto fail;
}
@@ -634,7 +694,7 @@ int npu_host_init(struct npu_device *npu_dev)
host_ctx->wq_pri =
alloc_workqueue("npu_ipc_wq", WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!host_ctx->wq || !host_ctx->wq_pri) {
- sts = -EPERM;
+ ret = -EPERM;
goto fail;
} else {
INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
@@ -646,25 +706,47 @@ int npu_host_init(struct npu_device *npu_dev)
npu_disable_fw_work);
}
- host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
- if (!host_ctx->ipc_msg_buf) {
- NPU_ERR("Failed to allocate ipc buffer\n");
- sts = -ENOMEM;
+ host_ctx->network_cmd_cache = kmem_cache_create("network_cmd_cache",
+ sizeof(struct npu_network_cmd), 0, 0, NULL);
+ if (!host_ctx->network_cmd_cache) {
+ NPU_ERR("Failed to create network_cmd_cache\n");
+ ret = -ENOMEM;
goto fail;
}
- host_ctx->prop_buf = kzalloc(sizeof(struct msm_npu_property),
- GFP_KERNEL);
- if (!host_ctx->prop_buf) {
- sts = -ENOMEM;
+ host_ctx->misc_cmd_cache = kmem_cache_create("misc_cmd_cache",
+ sizeof(struct npu_misc_cmd), 0, 0, NULL);
+ if (!host_ctx->misc_cmd_cache) {
+ NPU_ERR("Failed to create misc_cmd_cache\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ host_ctx->stats_buf_cache = kmem_cache_create(
+ "stats_buf_cache", NPU_MAX_STATS_BUF_SIZE, 0, 0, NULL);
+ if (!host_ctx->stats_buf_cache) {
+ NPU_ERR("Failed to create stats_buf_cache\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+ if (!host_ctx->ipc_msg_buf) {
+ NPU_ERR("Failed to allocate ipc buffer\n");
+ ret = -ENOMEM;
goto fail;
}
+ INIT_LIST_HEAD(&host_ctx->misc_cmd_list);
host_ctx->auto_pil_disable = false;
- return sts;
+ return 0;
+
fail:
kfree(host_ctx->ipc_msg_buf);
+ kmem_cache_destroy(host_ctx->stats_buf_cache);
+ kmem_cache_destroy(host_ctx->network_cmd_cache);
+ kmem_cache_destroy(host_ctx->misc_cmd_cache);
if (host_ctx->wq)
destroy_workqueue(host_ctx->wq);
if (host_ctx->wq_pri)
@@ -673,15 +755,17 @@ fail:
subsys_notif_unregister_notifier(host_ctx->notif_hdle,
&host_ctx->nb);
mutex_destroy(&host_ctx->lock);
- return sts;
+ return ret;
}
void npu_host_deinit(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- kfree(host_ctx->prop_buf);
kfree(host_ctx->ipc_msg_buf);
+ kmem_cache_destroy(host_ctx->stats_buf_cache);
+ kmem_cache_destroy(host_ctx->network_cmd_cache);
+ kmem_cache_destroy(host_ctx->misc_cmd_cache);
destroy_workqueue(host_ctx->wq);
destroy_workqueue(host_ctx->wq_pri);
subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
@@ -777,6 +861,8 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
struct npu_network *network = NULL;
struct npu_kevent kevt;
+ struct npu_network_cmd *cmd;
+ struct npu_misc_cmd *misc_cmd;
bool fw_alive = true;
int i, ret = 0;
@@ -790,10 +876,11 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
if (host_ctx->wdg_irq_sts) {
NPU_INFO("watchdog irq triggered\n");
- npu_dump_debug_info(npu_dev);
fw_alive = false;
}
+ npu_dump_debug_info(npu_dev);
+
/*
* if fw is still alive, notify fw before power off
* otherwise if ssr happens or notify fw returns failure
@@ -827,6 +914,9 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
goto fw_start_done;
}
+ host_ctx->wdg_irq_sts = 0;
+ host_ctx->err_irq_sts = 0;
+
/* Keep reading ctrl status until NPU is ready */
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
FW_CTRL_STATUS_MAIN_THREAD_READY_VAL, false)) {
@@ -853,29 +943,41 @@ fw_start_done:
}
complete(&host_ctx->fw_deinit_done);
- host_ctx->wdg_irq_sts = 0;
- host_ctx->err_irq_sts = 0;
/* flush all pending npu cmds */
for (i = 0; i < MAX_LOADED_NETWORK; i++) {
network = &host_ctx->networks[i];
- if (network->is_valid && network->cmd_pending &&
- network->fw_error) {
- if (network->cmd_async) {
- NPU_DBG("async cmd, queue ssr event\n");
- kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
- kevt.evt.u.ssr.network_hdl =
- network->network_hdl;
- if (npu_queue_event(network->client, &kevt))
- NPU_ERR("queue npu event failed\n");
- } else {
- NPU_DBG("complete network %llx\n",
- network->id);
- complete(&network->cmd_done);
+ if (!network->is_valid || !network->fw_error)
+ continue;
+
+ if (network->is_async) {
+ NPU_DBG("async cmd, queue ssr event\n");
+ kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
+ kevt.evt.u.ssr.network_hdl =
+ network->network_hdl;
+ if (npu_queue_event(network->client, &kevt))
+ NPU_ERR("queue npu event failed\n");
+
+ while (!list_empty(&network->cmd_list)) {
+ cmd = list_first_entry(&network->cmd_list,
+ struct npu_network_cmd, list);
+ npu_dequeue_network_cmd(network, cmd);
+ npu_free_network_cmd(host_ctx, cmd);
+ }
+ } else {
+ list_for_each_entry(cmd, &network->cmd_list, list) {
+ NPU_DBG("complete network %llx trans_id %d\n",
+ network->id, cmd->trans_id);
+ complete(&cmd->cmd_done);
}
}
}
- complete_all(&host_ctx->misc_cmd_done);
+
+ list_for_each_entry(misc_cmd, &host_ctx->misc_cmd_list, list) {
+ NPU_DBG("complete misc cmd trans_id %d\n",
+ misc_cmd->trans_id);
+ complete(&misc_cmd->cmd_done);
+ }
mutex_unlock(&host_ctx->lock);
return ret;
@@ -989,6 +1091,7 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
{
struct ipc_cmd_log_state_pkt log_packet;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_misc_cmd *misc_cmd = NULL;
int ret = 0;
mutex_lock(&host_ctx->lock);
@@ -999,16 +1102,25 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
log_packet.header.flags = 0xF;
log_packet.log_state.module_msk = 0;
log_packet.log_state.level_msk = 0;
+
+ misc_cmd = npu_alloc_misc_cmd(host_ctx);
+ if (!misc_cmd) {
+ NPU_ERR("Can't allocate misc_cmd\n");
+ return;
+ }
+
+ misc_cmd->cmd_type = NPU_IPC_CMD_CONFIG_LOG;
+ misc_cmd->trans_id = log_packet.header.trans_id;
+
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
- &log_packet);
+ &log_packet, misc_cmd);
NPU_DBG("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
if (ret)
NPU_ERR("npu_host_ipc_send_cmd failed\n");
- else
- host_ctx->misc_cmd_pending = false;
+ npu_free_misc_cmd(host_ctx, misc_cmd);
mutex_unlock(&host_ctx->lock);
}
@@ -1038,7 +1150,13 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
if (!wait_cnt) {
NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
status_bits, ctrl_sts, status_reg);
- return -EPERM;
+ return -ETIMEDOUT;
+ }
+
+ if ((host_ctx->wdg_irq_sts != 0) ||
+ (host_ctx->err_irq_sts != 0)) {
+ NPU_WARN("fw is in bad state, skip wait\n");
+ return -EIO;
}
if (poll)
@@ -1123,15 +1241,9 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
memset(network, 0, sizeof(struct npu_network));
network->id = i + 1;
- init_completion(&network->cmd_done);
network->is_valid = true;
network->client = client;
- network->stats_buf = kzalloc(NPU_MAX_STATS_BUF_SIZE,
- GFP_KERNEL);
- if (!network->stats_buf) {
- memset(network, 0, sizeof(struct npu_network));
- return NULL;
- }
+ INIT_LIST_HEAD(&network->cmd_list);
ctx->network_num++;
return network;
@@ -1194,14 +1306,23 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
int64_t id)
{
struct npu_network *network = NULL;
+ struct npu_network_cmd *cmd;
WARN_ON(!mutex_is_locked(&ctx->lock));
network = get_network_by_id(ctx, client, id);
if (network) {
network_put(network);
+ while (!list_empty(&network->cmd_list)) {
+ cmd = list_first_entry(&network->cmd_list,
+ struct npu_network_cmd, list);
+ NPU_WARN("Free cmd %x type %x\n", cmd->cmd_id,
+ cmd->cmd_type);
+ npu_dequeue_network_cmd(network, cmd);
+ npu_free_network_cmd(ctx, cmd);
+ }
+
if (atomic_read(&network->ref_cnt) == 0) {
- kfree(network->stats_buf);
memset(network, 0, sizeof(struct npu_network));
ctx->network_num--;
} else {
@@ -1214,6 +1335,41 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
/*
* Function Definitions - IPC
*/
+static struct npu_network_cmd *npu_alloc_network_cmd(struct npu_host_ctx *ctx,
+ uint32_t stats_buf_size)
+{
+ struct npu_network_cmd *cmd = NULL;
+
+ cmd = kmem_cache_zalloc(ctx->network_cmd_cache, GFP_KERNEL);
+ if (!cmd) {
+ NPU_ERR("Can't allocate network cmd\n");
+ return NULL;
+ }
+
+ init_completion(&cmd->cmd_done);
+
+ if (stats_buf_size == 0)
+ return cmd;
+
+ cmd->stats_buf = kmem_cache_zalloc(ctx->stats_buf_cache,
+ GFP_KERNEL);
+ if (!cmd->stats_buf) {
+ kmem_cache_free(ctx->network_cmd_cache, cmd);
+ return NULL;
+ }
+ cmd->stats_buf_size = stats_buf_size;
+
+ return cmd;
+}
+
+static void npu_free_network_cmd(struct npu_host_ctx *ctx,
+ struct npu_network_cmd *cmd)
+{
+ if (cmd->stats_buf)
+ kmem_cache_free(ctx->stats_buf_cache, cmd->stats_buf);
+ kmem_cache_free(ctx->network_cmd_cache, cmd);
+}
+
static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
{
struct npu_kevent *kevt = kmalloc(sizeof(*kevt), GFP_KERNEL);
@@ -1231,12 +1387,145 @@ static int npu_queue_event(struct npu_client *client, struct npu_kevent *evt)
return 0;
}
+static void npu_queue_network_cmd(struct npu_network *network,
+ struct npu_network_cmd *cmd)
+{
+ INIT_LIST_HEAD(&cmd->list);
+ list_add_tail(&cmd->list, &network->cmd_list);
+}
+
+static void npu_dequeue_network_cmd(struct npu_network *network,
+ struct npu_network_cmd *cmd)
+{
+ list_del(&cmd->list);
+}
+
+static struct npu_network_cmd *npu_find_network_cmd(struct npu_network *network,
+ uint32_t trans_id)
+{
+ struct npu_network_cmd *cmd;
+
+ list_for_each_entry(cmd, &network->cmd_list, list) {
+ if (cmd->trans_id == trans_id) {
+ NPU_DBG("find cmd for trans_id %d\n", trans_id);
+ return cmd;
+ }
+ }
+
+ NPU_ERR("can't find cmd for trans_id %d\n", trans_id);
+ return NULL;
+}
+
+static struct npu_misc_cmd *npu_alloc_misc_cmd(struct npu_host_ctx *ctx)
+{
+ struct npu_misc_cmd *cmd = NULL;
+
+ cmd = kmem_cache_zalloc(ctx->misc_cmd_cache, GFP_KERNEL);
+ if (!cmd)
+ return NULL;
+
+ init_completion(&cmd->cmd_done);
+
+ return cmd;
+}
+
+static void npu_free_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd)
+{
+ kmem_cache_free(ctx->misc_cmd_cache, cmd);
+}
+
+static void npu_queue_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd)
+{
+ INIT_LIST_HEAD(&cmd->list);
+ list_add_tail(&cmd->list, &ctx->misc_cmd_list);
+}
+
+static void npu_dequeue_misc_cmd(struct npu_host_ctx *ctx,
+ struct npu_misc_cmd *cmd)
+{
+ list_del(&cmd->list);
+}
+
+static struct npu_misc_cmd *npu_find_misc_cmd(struct npu_host_ctx *ctx,
+ uint32_t trans_id)
+{
+ struct npu_misc_cmd *cmd;
+
+ list_for_each_entry(cmd, &ctx->misc_cmd_list, list) {
+ if (cmd->trans_id == trans_id) {
+ NPU_DBG("find misc cmd for trans_id %d\n", trans_id);
+ return cmd;
+ }
+ }
+
+ NPU_ERR("can't find misc cmd for trans_id %d\n", trans_id);
+ return NULL;
+}
+
+int npu_process_kevent(struct npu_client *client, struct npu_kevent *kevt)
+{
+ struct npu_device *npu_dev = client->npu_dev;
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ int ret = 0;
+
+ mutex_lock(&host_ctx->lock);
+
+ switch (kevt->evt.type) {
+ case MSM_NPU_EVENT_TYPE_EXEC_V2_DONE:
+ {
+ struct npu_network_cmd *cmd = NULL;
+ struct npu_network *network;
+
+ network = get_network_by_hdl(host_ctx,
+ client, kevt->reserved[0]);
+ if (!network) {
+ NPU_ERR("Can't find network %x\n", kevt->reserved[0]);
+ ret = -EINVAL;
+ break;
+ }
+
+ cmd = npu_find_network_cmd(network, kevt->reserved[1]);
+ if (!cmd) {
+ NPU_ERR("can't find exec cmd with trans_id:%d\n",
+ kevt->reserved[1]);
+ network_put(network);
+ ret = -EINVAL;
+ break;
+ }
+
+ kevt->evt.reserved[0] = cmd->cmd_id;
+ ret = copy_to_user((void __user *)cmd->stats_buf_u,
+ (void *)cmd->stats_buf,
+ kevt->evt.u.exec_v2_done.stats_buf_size);
+ if (ret) {
+ NPU_ERR("fail to copy to user\n");
+ kevt->evt.u.exec_v2_done.stats_buf_size = 0;
+ ret = -EFAULT;
+ }
+
+ npu_dequeue_network_cmd(network, cmd);
+ npu_free_network_cmd(host_ctx, cmd);
+ network_put(network);
+ break;
+ }
+ default:
+ break;
+ }
+ mutex_unlock(&host_ctx->lock);
+
+ return ret;
+}
+
static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
{
uint32_t msg_id;
struct npu_network *network = NULL;
struct npu_kevent kevt;
struct npu_device *npu_dev = host_ctx->npu_dev;
+ struct npu_network_cmd *network_cmd = NULL;
+ struct npu_misc_cmd *misc_cmd = NULL;
msg_id = msg[1];
switch (msg_id) {
@@ -1257,24 +1546,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
break;
}
- if (network->trans_id != exe_rsp_pkt->header.trans_id) {
- NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
- network->trans_id,
+ network_cmd = npu_find_network_cmd(network,
+ exe_rsp_pkt->header.trans_id);
+ if (!network_cmd) {
+ NPU_ERR("can't find exec cmd with trans_id:%d\n",
exe_rsp_pkt->header.trans_id);
- NPU_ERR("execute_pkt network hdl check %d:%d\n",
- network->network_hdl,
- exe_rsp_pkt->network_hdl);
- NPU_ERR("execute_pkt network_id check %x\n",
- network->id);
network_put(network);
break;
}
- network->cmd_pending = false;
- network->cmd_ret_status = exe_rsp_pkt->header.status;
+ network_cmd->ret_status = exe_rsp_pkt->header.status;
- if (!network->cmd_async) {
- complete(&network->cmd_done);
+ if (!network_cmd->async) {
+ complete(&network_cmd->cmd_done);
} else {
NPU_DBG("async cmd, queue event\n");
kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
@@ -1307,15 +1591,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
break;
}
- if (network->trans_id != exe_rsp_pkt->header.trans_id) {
- NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
- network->trans_id,
- exe_rsp_pkt->header.trans_id);
- NPU_ERR("execute_pkt_v2 network hdl check %d:%d\n",
- network->network_hdl,
+ network_cmd = npu_find_network_cmd(network,
+ exe_rsp_pkt->header.trans_id);
+ if (!network_cmd) {
+ NPU_ERR("can't find exec cmd with trans_id:%d:%d\n",
+ exe_rsp_pkt->header.trans_id,
exe_rsp_pkt->network_hdl);
- NPU_ERR("execute_pkt_v2 network id check %x\n",
- network->id);
network_put(network);
break;
}
@@ -1324,17 +1605,16 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
stats_size);
- stats_size = stats_size < network->stats_buf_size ?
- stats_size : network->stats_buf_size;
+ stats_size = stats_size < network_cmd->stats_buf_size ?
+ stats_size : network_cmd->stats_buf_size;
if (stats_size)
- memcpy(network->stats_buf, exe_rsp_pkt->stats_data,
+ memcpy(network_cmd->stats_buf, exe_rsp_pkt->stats_data,
stats_size);
- network->stats_buf_size = stats_size;
- network->cmd_pending = false;
- network->cmd_ret_status = exe_rsp_pkt->header.status;
+ network_cmd->stats_buf_size = stats_size;
+ network_cmd->ret_status = exe_rsp_pkt->header.status;
- if (network->cmd_async) {
+ if (network_cmd->async) {
NPU_DBG("async cmd, queue event\n");
kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
kevt.evt.u.exec_v2_done.network_hdl =
@@ -1342,13 +1622,12 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
kevt.evt.u.exec_v2_done.exec_result =
exe_rsp_pkt->header.status;
kevt.evt.u.exec_v2_done.stats_buf_size = stats_size;
- kevt.reserved[0] = (uint64_t)network->stats_buf;
- kevt.reserved[1] =
- (uint64_t __user)network->stats_buf_u;
+ kevt.reserved[0] = (uint64_t)network->network_hdl;
+ kevt.reserved[1] = (uint64_t)network_cmd->trans_id;
if (npu_queue_event(network->client, &kevt))
NPU_ERR("queue npu event failed\n");
} else {
- complete(&network->cmd_done);
+ complete(&network_cmd->cmd_done);
}
network_put(network);
break;
@@ -1375,19 +1654,19 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
break;
}
- if (network->trans_id != load_rsp_pkt->header.trans_id) {
- NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
- network->trans_id,
+ network_cmd = npu_find_network_cmd(network,
+ load_rsp_pkt->header.trans_id);
+ if (!network_cmd) {
+ NPU_ERR("can't find load cmd with trans_id:%d\n",
load_rsp_pkt->header.trans_id);
network_put(network);
break;
}
network->network_hdl = load_rsp_pkt->network_hdl;
- network->cmd_pending = false;
- network->cmd_ret_status = load_rsp_pkt->header.status;
+ network_cmd->ret_status = load_rsp_pkt->header.status;
- complete(&network->cmd_done);
+ complete(&network_cmd->cmd_done);
network_put(network);
break;
}
@@ -1408,18 +1687,18 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
break;
}
- if (network->trans_id != unload_rsp_pkt->header.trans_id) {
- NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
- network->trans_id,
+ network_cmd = npu_find_network_cmd(network,
+ unload_rsp_pkt->header.trans_id);
+ if (!network_cmd) {
+ NPU_ERR("can't find unload cmd with trans_id:%d\n",
unload_rsp_pkt->header.trans_id);
network_put(network);
break;
}
- network->cmd_pending = false;
- network->cmd_ret_status = unload_rsp_pkt->header.status;
+ network_cmd->ret_status = unload_rsp_pkt->header.status;
- complete(&network->cmd_done);
+ complete(&network_cmd->cmd_done);
network_put(network);
break;
}
@@ -1430,9 +1709,17 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
NPU_DBG("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
lb_rsp_pkt->loopbackParams);
- host_ctx->misc_cmd_result = lb_rsp_pkt->header.status;
- host_ctx->misc_cmd_pending = false;
- complete_all(&host_ctx->misc_cmd_done);
+
+ misc_cmd = npu_find_misc_cmd(host_ctx,
+ lb_rsp_pkt->header.trans_id);
+ if (!misc_cmd) {
+ NPU_ERR("can't find loopback cmd with trans_id:%d\n",
+ lb_rsp_pkt->header.trans_id);
+ break;
+ }
+
+ misc_cmd->ret_status = lb_rsp_pkt->header.status;
+ complete_all(&misc_cmd->cmd_done);
break;
}
case NPU_IPC_MSG_SET_PROPERTY_DONE:
@@ -1446,10 +1733,16 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
prop_rsp_pkt->prop_id,
param[0]);
- host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
- host_ctx->misc_cmd_pending = false;
+ misc_cmd = npu_find_misc_cmd(host_ctx,
+ prop_rsp_pkt->header.trans_id);
+ if (!misc_cmd) {
+ NPU_ERR("can't find set_prop cmd with trans_id:%d\n",
+ prop_rsp_pkt->header.trans_id);
+ break;
+ }
- complete_all(&host_ctx->misc_cmd_done);
+ misc_cmd->ret_status = prop_rsp_pkt->header.status;
+ complete(&misc_cmd->cmd_done);
break;
}
case NPU_IPC_MSG_GET_PROPERTY_DONE:
@@ -1466,17 +1759,28 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
prop_rsp_pkt->num_params,
prop_rsp_pkt->prop_param[0]);
- host_ctx->misc_cmd_result = prop_rsp_pkt->header.status;
- host_ctx->misc_cmd_pending = false;
+ misc_cmd = npu_find_misc_cmd(host_ctx,
+ prop_rsp_pkt->header.trans_id);
+ if (!misc_cmd) {
+ NPU_ERR("can't find get_prop cmd with trans_id:%d\n",
+ prop_rsp_pkt->header.trans_id);
+ break;
+ }
+
+ misc_cmd->ret_status = prop_rsp_pkt->header.status;
if (prop_rsp_pkt->num_params > 0) {
/* Copy prop data to kernel buffer */
prop_size = prop_rsp_pkt->header.size -
sizeof(struct ipc_msg_header_pkt);
- memcpy(host_ctx->prop_buf, prop_data, prop_size);
+ if (prop_size > sizeof(struct msm_npu_property)) {
+ NPU_WARN("Invalid prop size %d\n", prop_size);
+ prop_size = sizeof(struct msm_npu_property);
+ }
+ memcpy(&misc_cmd->u.prop, prop_data, prop_size);
}
- complete_all(&host_ctx->misc_cmd_done);
+ complete_all(&misc_cmd->cmd_done);
break;
}
case NPU_IPC_MSG_GENERAL_NOTIFY:
@@ -1620,7 +1924,8 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
}
static int npu_send_network_cmd(struct npu_device *npu_dev,
- struct npu_network *network, void *cmd_ptr, bool async, bool force)
+ struct npu_network *network, void *cmd_ptr,
+ struct npu_network_cmd *cmd)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
int ret = 0;
@@ -1631,29 +1936,22 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
(host_ctx->fw_state != FW_ENABLED)) {
NPU_ERR("fw is in error state or disabled\n");
ret = -EIO;
- } else if (network->cmd_pending && !force) {
- NPU_ERR("Another cmd is pending\n");
- ret = -EBUSY;
} else {
- network->cmd_async = async;
- network->cmd_ret_status = 0;
- network->cmd_pending = true;
- network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
- reinit_completion(&network->cmd_done);
+ if (cmd)
+ reinit_completion(&cmd->cmd_done);
NPU_DBG("Send cmd %d network id %llx trans id %d\n",
((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
- network->id, network->trans_id);
+ network->id,
+ ((struct ipc_cmd_header_pkt *)cmd_ptr)->trans_id);
ret = npu_host_ipc_send_cmd(npu_dev,
IPC_QUEUE_APPS_EXEC, cmd_ptr);
- if (ret)
- network->cmd_pending = false;
}
return ret;
}
static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
- void *cmd_ptr)
+ void *cmd_ptr, struct npu_misc_cmd *cmd)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
int ret = 0;
@@ -1663,17 +1961,10 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
if (host_ctx->fw_error || (host_ctx->fw_state != FW_ENABLED)) {
NPU_ERR("fw is in error state or disabled\n");
ret = -EIO;
- } else if (host_ctx->misc_cmd_pending) {
- NPU_ERR("Another misc cmd is pending\n");
- ret = -EBUSY;
} else {
- NPU_DBG("Send cmd %d\n",
- ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type);
- host_ctx->misc_cmd_pending = true;
- reinit_completion(&host_ctx->misc_cmd_done);
+ NPU_DBG("Send cmd %d\n", cmd->cmd_type);
+ reinit_completion(&cmd->cmd_done);
ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr);
- if (ret)
- host_ctx->misc_cmd_pending = false;
}
return ret;
@@ -1711,8 +2002,9 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
max_perf_mode = 1;
} else {
/* find the max level among all the networks */
- for (i = 0; i < host_ctx->network_num; i++) {
- if ((network->cur_perf_mode != 0) &&
+ for (i = 0; i < MAX_LOADED_NETWORK; i++) {
+ if ((network->id != 0) &&
+ (network->cur_perf_mode != 0) &&
(network->cur_perf_mode > max_perf_mode))
max_perf_mode = network->cur_perf_mode;
network++;
@@ -1760,6 +2052,7 @@ int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
struct ipc_cmd_prop_pkt *prop_packet = NULL;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
uint32_t num_of_params, pkt_size;
+ struct npu_misc_cmd *misc_cmd = NULL;
prop_id = property->prop_id;
num_of_params = min_t(uint32_t, property->num_of_params,
@@ -1802,18 +2095,29 @@ int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
prop_packet->prop_param[i] = property->prop_param[i];
mutex_lock(&host_ctx->lock);
+ misc_cmd = npu_alloc_misc_cmd(host_ctx);
+ if (!misc_cmd) {
+ NPU_ERR("Can't allocate misc_cmd\n");
+ ret = -ENOMEM;
+ goto set_prop_exit;
+ }
+
+ misc_cmd->cmd_type = NPU_IPC_CMD_SET_PROPERTY;
+ misc_cmd->trans_id = prop_packet->header.trans_id;
+ npu_queue_misc_cmd(host_ctx, misc_cmd);
+
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
- prop_packet);
+ prop_packet, misc_cmd);
NPU_DBG("NPU_IPC_CMD_SET_PROPERTY sent status: %d\n", ret);
if (ret) {
NPU_ERR("NPU_IPC_CMD_SET_PROPERTY failed\n");
- goto set_prop_exit;
+ goto free_misc_cmd;
}
mutex_unlock(&host_ctx->lock);
ret = wait_for_completion_interruptible_timeout(
- &host_ctx->misc_cmd_done,
+ &misc_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1821,16 +2125,19 @@ int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
if (!ret) {
NPU_ERR("NPU_IPC_CMD_SET_PROPERTY time out\n");
ret = -ETIMEDOUT;
- goto set_prop_exit;
+ goto free_misc_cmd;
} else if (ret < 0) {
NPU_ERR("Wait for set_property done interrupted by signal\n");
- goto set_prop_exit;
+ goto free_misc_cmd;
}
- ret = host_ctx->misc_cmd_result;
+ ret = misc_cmd->ret_status;
if (ret)
NPU_ERR("set fw property failed %d\n", ret);
+free_misc_cmd:
+ npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+ npu_free_misc_cmd(host_ctx, misc_cmd);
set_prop_exit:
mutex_unlock(&host_ctx->lock);
kfree(prop_packet);
@@ -1845,6 +2152,13 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
struct msm_npu_property *prop_from_fw;
uint32_t num_of_params, pkt_size;
+ struct npu_misc_cmd *misc_cmd = NULL;
+
+ if (property->prop_id < MSM_NPU_FW_PROP_ID_START) {
+ NPU_ERR("Not supported fw property id %x\n",
+ property->prop_id);
+ return -EINVAL;
+ }
num_of_params = min_t(uint32_t, property->num_of_params,
(uint32_t)PROP_PARAM_MAX_SIZE);
@@ -1867,18 +2181,29 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
prop_packet->prop_param[i] = property->prop_param[i];
mutex_lock(&host_ctx->lock);
+ misc_cmd = npu_alloc_misc_cmd(host_ctx);
+ if (!misc_cmd) {
+ NPU_ERR("Can't allocate misc_cmd\n");
+ ret = -ENOMEM;
+ goto get_prop_exit;
+ }
+
+ misc_cmd->cmd_type = NPU_IPC_CMD_GET_PROPERTY;
+ misc_cmd->trans_id = prop_packet->header.trans_id;
+ npu_queue_misc_cmd(host_ctx, misc_cmd);
+
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC,
- prop_packet);
+ prop_packet, misc_cmd);
NPU_DBG("NPU_IPC_CMD_GET_PROPERTY sent status: %d\n", ret);
if (ret) {
NPU_ERR("NPU_IPC_CMD_GET_PROPERTY failed\n");
- goto get_prop_exit;
+ goto free_misc_cmd;
}
mutex_unlock(&host_ctx->lock);
ret = wait_for_completion_interruptible_timeout(
- &host_ctx->misc_cmd_done,
+ &misc_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -1886,16 +2211,16 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
if (!ret) {
pr_err_ratelimited("npu: NPU_IPC_CMD_GET_PROPERTY time out\n");
ret = -ETIMEDOUT;
- goto get_prop_exit;
+ goto free_misc_cmd;
} else if (ret < 0) {
NPU_ERR("Wait for get_property done interrupted by signal\n");
- goto get_prop_exit;
+ goto free_misc_cmd;
}
- ret = host_ctx->misc_cmd_result;
+ ret = misc_cmd->ret_status;
if (!ret) {
/* Return prop data retrieved from fw to user */
- prop_from_fw = (struct msm_npu_property *)(host_ctx->prop_buf);
+ prop_from_fw = &misc_cmd->u.prop;
if (property->prop_id == prop_from_fw->prop_id &&
property->network_hdl == prop_from_fw->network_hdl) {
property->num_of_params = num_of_params;
@@ -1907,6 +2232,9 @@ int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
NPU_ERR("get fw property failed %d\n", ret);
}
+free_misc_cmd:
+ npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+ npu_free_misc_cmd(host_ctx, misc_cmd);
get_prop_exit:
mutex_unlock(&host_ctx->lock);
kfree(prop_packet);
@@ -1917,13 +2245,13 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info)
{
- int ret = 0, i;
+ int ret = 0, retry_cnt = 1, i;
struct npu_device *npu_dev = client->npu_dev;
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
struct npu_network *network;
struct ipc_cmd_load_pkt_v2 *load_packet = NULL;
- struct ipc_cmd_unload_pkt unload_packet;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network_cmd *load_cmd = NULL;
uint32_t num_patch_params, pkt_size;
ret = enable_fw(npu_dev);
@@ -1988,16 +2316,31 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
load_packet->buf_pkt.num_layers = network->num_layers;
load_packet->num_patch_params = num_patch_params;
- ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
+ load_cmd = npu_alloc_network_cmd(host_ctx, 0);
+ if (!load_cmd) {
+ NPU_ERR("Can't allocate load_cmd\n");
+ ret = -ENOMEM;
+ goto error_free_network;
+ }
+
+ load_cmd->cmd_id = 0;
+ load_cmd->cmd_type = NPU_IPC_CMD_LOAD_V2;
+ load_cmd->trans_id = load_packet->header.trans_id;
+ load_cmd->async = false;
+ npu_queue_network_cmd(network, load_cmd);
+
+ /* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
+ ret = npu_send_network_cmd(npu_dev, network, load_packet, load_cmd);
if (ret) {
NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
- goto error_free_network;
+ goto free_load_cmd;
}
mutex_unlock(&host_ctx->lock);
+retry:
ret = wait_for_completion_timeout(
- &network->cmd_done,
+ &load_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -2006,47 +2349,59 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
if (network->fw_error) {
ret = -EIO;
NPU_ERR("fw is in error state during load_v2 network\n");
- goto error_free_network;
+ goto free_load_cmd;
}
if (!ret) {
NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
- network->id, network->trans_id);
- npu_dump_debug_info(npu_dev);
+ network->id, load_cmd->trans_id);
+ if (retry_cnt > 0) {
+ NPU_WARN("Retry IPC queue\n");
+ retry_cnt--;
+ mutex_unlock(&host_ctx->lock);
+ host_session_msg_hdlr(npu_dev);
+ goto retry;
+ }
+
ret = -ETIMEDOUT;
- goto error_load_network;
+ goto free_load_cmd;
}
- ret = network->cmd_ret_status;
- if (ret)
- goto error_free_network;
+ ret = load_cmd->ret_status;
+ if (ret) {
+ NPU_ERR("load network failed status %d\n", ret);
+ goto free_load_cmd;
+ }
load_ioctl->network_hdl = network->network_hdl;
network->is_active = true;
kfree(load_packet);
+ npu_dequeue_network_cmd(network, load_cmd);
+ npu_free_network_cmd(host_ctx, load_cmd);
network_put(network);
mutex_unlock(&host_ctx->lock);
return ret;
-error_load_network:
- NPU_DBG("Unload network %lld\n", network->id);
- /* send NPU_IPC_CMD_UNLOAD command to fw */
- unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
- unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
- unload_packet.header.trans_id =
- atomic_add_return(1, &host_ctx->ipc_trans_id);
- unload_packet.header.flags = 0;
- unload_packet.network_hdl = (uint32_t)network->network_hdl;
- npu_send_network_cmd(npu_dev, network, &unload_packet, false, true);
- /* wait 200 ms to make sure fw has processed this command */
- msleep(200);
+free_load_cmd:
+ npu_dequeue_network_cmd(network, load_cmd);
+ npu_free_network_cmd(host_ctx, load_cmd);
error_free_network:
kfree(load_packet);
network_put(network);
free_network(host_ctx, client, network->id);
err_deinit_fw:
mutex_unlock(&host_ctx->lock);
+
+ /*
+ * treat load network timed out as error in order to
+ * force SSR
+ */
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("Error handling after load network failure\n");
+ host_error_hdlr(npu_dev, true);
+ }
+
disable_fw(npu_dev);
return ret;
}
@@ -2054,11 +2409,12 @@ err_deinit_fw:
int32_t npu_host_unload_network(struct npu_client *client,
struct msm_npu_unload_network_ioctl *unload)
{
- int ret = 0;
+ int ret = 0, retry_cnt = 1;
struct npu_device *npu_dev = client->npu_dev;
struct ipc_cmd_unload_pkt unload_packet;
struct npu_network *network;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_network_cmd *unload_cmd = NULL;
/* get the corresponding network for ipc trans id purpose */
mutex_lock(&host_ctx->lock);
@@ -2090,8 +2446,22 @@ int32_t npu_host_unload_network(struct npu_client *client,
unload_packet.header.flags = 0;
unload_packet.network_hdl = (uint32_t)network->network_hdl;
- ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
- false);
+ unload_cmd = npu_alloc_network_cmd(host_ctx, 0);
+ if (!unload_cmd) {
+ NPU_ERR("Can't allocate unload_cmd\n");
+ ret = -ENOMEM;
+ goto free_network;
+ }
+
+ unload_cmd->cmd_id = 0;
+ unload_cmd->cmd_type = NPU_IPC_CMD_UNLOAD;
+ unload_cmd->trans_id = unload_packet.header.trans_id;
+ unload_cmd->async = false;
+ npu_queue_network_cmd(network, unload_cmd);
+
+ /* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
+ ret = npu_send_network_cmd(npu_dev, network, &unload_packet,
+ unload_cmd);
if (ret) {
NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
@@ -2101,17 +2471,20 @@ int32_t npu_host_unload_network(struct npu_client *client,
*/
if (ret == -EBUSY) {
NPU_ERR("Network is running, retry later\n");
+ npu_dequeue_network_cmd(network, unload_cmd);
+ npu_free_network_cmd(host_ctx, unload_cmd);
network_put(network);
mutex_unlock(&host_ctx->lock);
return ret;
}
- goto free_network;
+ goto free_unload_cmd;
}
mutex_unlock(&host_ctx->lock);
+retry:
ret = wait_for_completion_timeout(
- &network->cmd_done,
+ &unload_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -2125,16 +2498,25 @@ int32_t npu_host_unload_network(struct npu_client *client,
if (!ret) {
NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
- network->id, network->trans_id);
- npu_dump_debug_info(npu_dev);
- network->cmd_pending = false;
+ network->id, unload_cmd->trans_id);
+ if (retry_cnt > 0) {
+ NPU_WARN("Retry IPC queue\n");
+ retry_cnt--;
+ mutex_unlock(&host_ctx->lock);
+ host_session_msg_hdlr(npu_dev);
+ goto retry;
+ }
+
ret = -ETIMEDOUT;
- goto free_network;
+ goto free_unload_cmd;
}
- ret = network->cmd_ret_status;
+ ret = unload_cmd->ret_status;
NPU_DBG("unload network status %d\n", ret);
+free_unload_cmd:
+ npu_dequeue_network_cmd(network, unload_cmd);
+ npu_free_network_cmd(host_ctx, unload_cmd);
free_network:
/*
* free the network on the kernel if the corresponding ACO
@@ -2149,6 +2531,15 @@ free_network:
mutex_unlock(&host_ctx->lock);
+ /*
+ * treat unload network timed out as error in order to
+ * force SSR
+ */
+ if (ret == -ETIMEDOUT) {
+ NPU_ERR("Error handling after load network failure\n");
+ host_error_hdlr(npu_dev, true);
+ }
+
disable_fw(npu_dev);
return ret;
@@ -2160,12 +2551,13 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
{
struct npu_device *npu_dev = client->npu_dev;
struct ipc_cmd_execute_pkt_v2 *exec_packet;
+ struct npu_network_cmd *exec_cmd = NULL;
int32_t ret;
struct npu_network *network;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
uint32_t num_patch_params, pkt_size;
bool async_ioctl = !!exec_ioctl->async;
- int i;
+ int i, retry_cnt = 1;
mutex_lock(&host_ctx->lock);
network = get_network_by_hdl(host_ctx, client,
@@ -2188,6 +2580,14 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
goto exec_v2_done;
}
+ if (network->is_async && !async_ioctl) {
+ NPU_ERR("network is in async mode\n");
+ ret = -EINVAL;
+ goto exec_v2_done;
+ }
+
+ network->is_async = async_ioctl;
+
NPU_DBG("execute_v2 network %lld\n", network->id);
num_patch_params = exec_ioctl->patch_buf_info_num;
pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
@@ -2226,18 +2626,28 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
exec_packet->network_hdl = network->network_hdl;
exec_packet->num_patch_params = num_patch_params;
- network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
- network->stats_buf_size = exec_ioctl->stats_buf_size;
+ exec_cmd = npu_alloc_network_cmd(host_ctx, exec_ioctl->stats_buf_size);
+ if (!exec_cmd) {
+ NPU_ERR("Can't allocate exec_cmd\n");
+ ret = -ENOMEM;
+ goto free_exec_packet;
+ }
+
+ exec_cmd->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
+ exec_cmd->cmd_id = exec_ioctl->async;
+ exec_cmd->cmd_type = NPU_IPC_CMD_EXECUTE_V2;
+ exec_cmd->trans_id = exec_packet->header.trans_id;
+ exec_cmd->async = async_ioctl;
+ npu_queue_network_cmd(network, exec_cmd);
NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
exec_packet->header.flags, exec_ioctl->stats_buf_size);
- ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
- false);
+ ret = npu_send_network_cmd(npu_dev, network, exec_packet, exec_cmd);
if (ret) {
NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
- goto free_exec_packet;
+ goto free_exec_cmd;
}
if (async_ioctl) {
@@ -2247,8 +2657,9 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
mutex_unlock(&host_ctx->lock);
+retry:
ret = wait_for_completion_timeout(
- &network->cmd_done,
+ &exec_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -2256,32 +2667,42 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
if (network->fw_error) {
ret = -EIO;
NPU_ERR("fw is in error state during execute_v2 network\n");
- goto free_exec_packet;
+ goto free_exec_cmd;
}
if (!ret) {
NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
- network->id, network->trans_id);
- npu_dump_debug_info(npu_dev);
- network->cmd_pending = false;
+ network->id, exec_cmd->trans_id);
+ if (retry_cnt > 0) {
+ NPU_WARN("Retry IPC queue\n");
+ retry_cnt--;
+ mutex_unlock(&host_ctx->lock);
+ host_session_msg_hdlr(npu_dev);
+ goto retry;
+ }
+
ret = -ETIMEDOUT;
goto free_exec_packet;
}
- ret = network->cmd_ret_status;
- if (!ret) {
- exec_ioctl->stats_buf_size = network->stats_buf_size;
- if (copy_to_user(
- (void __user *)exec_ioctl->stats_buf_addr,
- network->stats_buf,
- exec_ioctl->stats_buf_size)) {
- NPU_ERR("copy stats to user failed\n");
- exec_ioctl->stats_buf_size = 0;
- }
- } else {
+ ret = exec_cmd->ret_status;
+ if (ret) {
NPU_ERR("execution failed %d\n", ret);
+ goto free_exec_cmd;
}
+ exec_ioctl->stats_buf_size = exec_cmd->stats_buf_size;
+ if (copy_to_user(
+ (void __user *)exec_ioctl->stats_buf_addr,
+ exec_cmd->stats_buf,
+ exec_ioctl->stats_buf_size)) {
+ NPU_ERR("copy stats to user failed\n");
+ exec_ioctl->stats_buf_size = 0;
+ }
+
+free_exec_cmd:
+ npu_dequeue_network_cmd(network, exec_cmd);
+ npu_free_network_cmd(host_ctx, exec_cmd);
free_exec_packet:
kfree(exec_packet);
exec_v2_done:
@@ -2304,6 +2725,7 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
{
struct ipc_cmd_loopback_pkt loopback_packet;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ struct npu_misc_cmd *misc_cmd = NULL;
int32_t ret;
ret = enable_fw(npu_dev);
@@ -2319,17 +2741,29 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
loopback_packet.header.flags = 0;
loopback_packet.loopbackParams = 15;
- ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet);
+ misc_cmd = npu_alloc_misc_cmd(host_ctx);
+ if (!misc_cmd) {
+ NPU_ERR("Can't allocate misc_cmd\n");
+ ret = -ENOMEM;
+ goto loopback_exit;
+ }
+
+ misc_cmd->cmd_type = NPU_IPC_CMD_LOOPBACK;
+ misc_cmd->trans_id = loopback_packet.header.trans_id;
+ npu_queue_misc_cmd(host_ctx, misc_cmd);
+
+ ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet,
+ misc_cmd);
if (ret) {
NPU_ERR("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
- goto loopback_exit;
+ goto free_misc_cmd;
}
mutex_unlock(&host_ctx->lock);
ret = wait_for_completion_interruptible_timeout(
- &host_ctx->misc_cmd_done,
+ &misc_cmd->cmd_done,
(host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
@@ -2342,9 +2776,12 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
} else if (ret < 0) {
NPU_ERR("Wait for loopback done interrupted by signal\n");
} else {
- ret = host_ctx->misc_cmd_result;
+ ret = misc_cmd->ret_status;
}
+free_misc_cmd:
+ npu_dequeue_misc_cmd(host_ctx, misc_cmd);
+ npu_free_misc_cmd(host_ctx, misc_cmd);
loopback_exit:
mutex_unlock(&host_ctx->lock);
disable_fw(npu_dev);
@@ -2464,3 +2901,10 @@ int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl)
return param_val;
}
+
+void npu_host_suspend(struct npu_device *npu_dev)
+{
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+ flush_delayed_work(&host_ctx->disable_fw_work);
+}
diff --git a/drivers/media/platform/msm/npu_v2/npu_mgr.h b/drivers/media/platform/msm/npu_v2/npu_mgr.h
index 8f9322a0054a..acb1e6f1c936 100644
--- a/drivers/media/platform/msm/npu_v2/npu_mgr.h
+++ b/drivers/media/platform/msm/npu_v2/npu_mgr.h
@@ -44,6 +44,33 @@
/*
* Data Structures
*/
+
+struct npu_network_cmd {
+ struct list_head list;
+ uint32_t cmd_type;
+ uint32_t cmd_id;
+ uint32_t trans_id;
+ bool async;
+ struct completion cmd_done;
+ /* stats buf info */
+ uint32_t stats_buf_size;
+ void __user *stats_buf_u;
+ void *stats_buf;
+ int ret_status;
+};
+
+struct npu_misc_cmd {
+ struct list_head list;
+ uint32_t cmd_type;
+ uint32_t trans_id;
+ union {
+ struct msm_npu_property prop;
+ uint32_t data[32];
+ } u;
+ struct completion cmd_done;
+ int ret_status;
+};
+
struct npu_network {
uint64_t id;
int buf_hdl;
@@ -55,19 +82,13 @@ struct npu_network {
uint32_t cur_perf_mode;
uint32_t init_perf_mode;
uint32_t num_layers;
- void *stats_buf;
- void __user *stats_buf_u;
- uint32_t stats_buf_size;
- uint32_t trans_id;
atomic_t ref_cnt;
bool is_valid;
bool is_active;
bool fw_error;
- bool cmd_pending;
- bool cmd_async;
- int cmd_ret_status;
- struct completion cmd_done;
+ bool is_async;
struct npu_client *client;
+ struct list_head cmd_list;
};
enum fw_state {
@@ -92,14 +113,15 @@ struct npu_host_ctx {
struct delayed_work disable_fw_work;
struct workqueue_struct *wq;
struct workqueue_struct *wq_pri;
- struct completion misc_cmd_done;
struct completion fw_deinit_done;
struct completion fw_bringup_done;
struct completion fw_shutdown_done;
struct completion npu_power_up_done;
- void *prop_buf;
int32_t network_num;
struct npu_network networks[MAX_LOADED_NETWORK];
+ struct kmem_cache *network_cmd_cache;
+ struct kmem_cache *misc_cmd_cache;
+ struct kmem_cache *stats_buf_cache;
bool sys_cache_disable;
bool auto_pil_disable;
uint32_t fw_dbg_mode;
@@ -111,13 +133,12 @@ struct npu_host_ctx {
uint32_t wdg_irq_sts;
bool fw_error;
bool cancel_work;
- bool misc_cmd_pending;
- uint32_t misc_cmd_result;
struct notifier_block nb;
void *notif_hdle;
spinlock_t bridge_mbox_lock;
bool bridge_mbox_pwr_on;
void *ipc_msg_buf;
+ struct list_head misc_cmd_list;
};
struct npu_device;
@@ -136,6 +157,7 @@ int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t queueIndex,
void *pCmd);
int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t queueIndex,
uint32_t *pMsg);
+int npu_host_get_ipc_queue_size(struct npu_device *npu_dev, uint32_t q_idx);
int32_t npu_host_get_info(struct npu_device *npu_dev,
struct msm_npu_get_info_ioctl *get_info_ioctl);
@@ -163,6 +185,7 @@ int npu_host_update_power(struct npu_device *npu_dev);
int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
uint32_t perf_mode);
int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
+void npu_host_suspend(struct npu_device *npu_dev);
void npu_dump_debug_info(struct npu_device *npu_dev);
void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr);
diff --git a/drivers/media/platform/msm/vidc/msm_v4l2_private.c b/drivers/media/platform/msm/vidc/msm_v4l2_private.c
index 89d6771d78f5..c9400a08a4e3 100644
--- a/drivers/media/platform/msm/vidc/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/vidc/msm_v4l2_private.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@ static int convert_from_user(struct msm_vidc_arg *kp, unsigned long arg)
{
int rc = 0;
int i;
- struct msm_vidc_arg __user *up = compat_ptr(arg);
+ struct msm_vidc_arg __user *up = (struct msm_vidc_arg __user *)arg;
if (!kp || !up) {
dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
@@ -109,7 +109,7 @@ static int convert_to_user(struct msm_vidc_arg *kp, unsigned long arg)
{
int rc = 0;
int i;
- struct msm_vidc_arg __user *up = compat_ptr(arg);
+ struct msm_vidc_arg __user *up = (struct msm_vidc_arg __user *)arg;
if (!kp || !up) {
dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index ff824d7fad03..96e0442aa26e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -6048,6 +6048,7 @@ exit:
void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
{
struct msm_vidc_buffer *mbuf;
+ struct msm_vidc_cvp_buffer *cbuf;
struct internal_buf *buf;
bool is_decode = false;
enum vidc_ports port;
@@ -6103,6 +6104,14 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
buf->buffer_type, buf->smem.device_addr,
buf->smem.size);
mutex_unlock(&inst->outputbufs.lock);
+
+ mutex_lock(&inst->cvpbufs.lock);
+ dprintk(VIDC_ERR, "cvp buffer list:\n");
+ list_for_each_entry(cbuf, &inst->cvpbufs.list, list)
+ dprintk(VIDC_ERR, "index: %u fd: %u offset: %u addr: %x\n",
+ cbuf->buf.index, cbuf->buf.fd,
+ cbuf->buf.offset, cbuf->smem.device_addr);
+ mutex_unlock(&inst->cvpbufs.lock);
}
int msm_comm_session_continue(void *instance)
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index 576f2d8236db..25afda57c514 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -2278,34 +2278,6 @@ static int venus_hfi_core_release(void *dev)
return rc;
}
-static int __get_q_size(struct venus_hfi_device *dev, unsigned int q_index)
-{
- struct hfi_queue_header *queue;
- struct vidc_iface_q_info *q_info;
- u32 write_ptr, read_ptr;
-
- if (q_index >= VIDC_IFACEQ_NUMQ) {
- dprintk(VIDC_ERR, "Invalid q index: %d\n", q_index);
- return -ENOENT;
- }
-
- q_info = &dev->iface_queues[q_index];
- if (!q_info) {
- dprintk(VIDC_ERR, "cannot read shared Q's\n");
- return -ENOENT;
- }
-
- queue = (struct hfi_queue_header *)q_info->q_hdr;
- if (!queue) {
- dprintk(VIDC_ERR, "queue not present\n");
- return -ENOENT;
- }
-
- write_ptr = (u32)queue->qhdr_write_idx;
- read_ptr = (u32)queue->qhdr_read_idx;
- return read_ptr - write_ptr;
-}
-
static void __core_clear_interrupt(struct venus_hfi_device *device)
{
u32 intr_status = 0, mask = 0;
@@ -3692,8 +3664,7 @@ static int __response_handler(struct venus_hfi_device *device)
*session_id = session->session_id;
}
- if (packet_count >= max_packets &&
- __get_q_size(device, VIDC_IFACEQ_MSGQ_IDX)) {
+ if (packet_count >= max_packets) {
dprintk(VIDC_WARN,
"Too many packets in message queue to handle at once, deferring read\n");
break;
diff --git a/drivers/mtd/devices/msm_qpic_nand.c b/drivers/mtd/devices/msm_qpic_nand.c
index 1de9ab22dca2..8444e5c094fe 100644
--- a/drivers/mtd/devices/msm_qpic_nand.c
+++ b/drivers/mtd/devices/msm_qpic_nand.c
@@ -1549,6 +1549,852 @@ out:
}
/*
+ *
+ * Function to prepare series of SPS command descriptors required for a page
+ * read operation with enhanced read pagescope feature.
+ */
+static void msm_nand_prep_read_cmd_desc_pagescope(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_rw_reg_data *data,
+ struct msm_nand_info *info,
+ struct msm_nand_rw_cmd_desc *cmd_list,
+ uint32_t ecc_parity_bytes)
+{
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t rdata;
+ /* read_location register parameters */
+ uint32_t offset, size, last_read;
+ struct sps_command_element *curr_ce, *start_ce;
+ uint32_t *flags_ptr, *num_ce_ptr;
+ uint32_t auto_status_value = 0x0;
+
+ curr_ce = start_ce = &cmd_list->setup_desc.ce[0];
+ num_ce_ptr = &cmd_list->setup_desc.num_ce;
+ flags_ptr = &cmd_list->setup_desc.flags;
+ *flags_ptr = CMD_LCK;
+ cmd_list->count = 1;
+
+ auto_status_value = (NAND_FLASH_STATUS_EN |
+ NANDC_BUFFER_STATUS_EN |
+ NAND_ERASED_CW_DETECT_STATUS_EN |
+ NAND_FLASH_STATUS_LAST_CW_EN |
+ NANDC_BUFFER_STATUS_LAST_CW_EN |
+ NAND_ERASED_CW_DETECT_STATUS_LAST_CW_EN);
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG0(info), WRITE,
+ data->cfg0);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_CFG1(info), WRITE,
+ data->cfg1);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_DEV0_ECC_CFG(info), WRITE,
+ data->ecc_bch_cfg);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_AUTO_STATUS_EN(info), WRITE,
+ auto_status_value);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR0(info), WRITE,
+ data->addr0);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_ADDR1(info), WRITE,
+ data->addr1);
+ curr_ce++;
+ *num_ce_ptr = curr_ce - start_ce;
+
+ /* Prepare next set of command descriptors */
+
+ curr_ce = start_ce = &cmd_list->cw_desc[0].ce[0];
+ num_ce_ptr = &cmd_list->cw_desc[0].num_ce;
+ flags_ptr = &cmd_list->cw_desc[0].flags;
+ *flags_ptr = CMD;
+ cmd_list->count++;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (ecc_parity_bytes) {
+ rdata = (BYTES_517 << 0) | (ecc_parity_bytes << 16)
+ | (1 << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_LAST_CW_0(info),
+ WRITE, rdata);
+ curr_ce++;
+
+ } else {
+ rdata = (0 << 0) | (chip->cw_size << 16) | (1 << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_LAST_CW_0(info),
+ WRITE, rdata);
+ curr_ce++;
+
+ }
+ }
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ offset = 0;
+ size = BYTES_516;
+ last_read = 1;
+ rdata = (offset << 0) | (size << 16) |
+ (last_read << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ size = (BYTES_512 - ((args->cwperpage - 1) << 2));
+ last_read = (ops->oobbuf ? 0 : 1);
+ rdata = (offset << 0) | (size << 16) |
+ (last_read << 31);
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_LAST_CW_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ }
+
+ if (ops->oobbuf) {
+ last_read = 1;
+ offset = BYTES_512 - ((args->cwperpage - 1) << 2);
+ size = (args->cwperpage) << 2;
+ if (size > args->oob_len_cmd)
+ size = args->oob_len_cmd;
+ args->oob_len_cmd -= size;
+ rdata = (offset << 0) | (size << 16) |
+ (last_read << 31);
+
+ if (!ops->datbuf) {
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_LAST_CW_0(info),
+ WRITE, rdata);
+ curr_ce++;
+ } else {
+ msm_nand_prep_ce(curr_ce,
+ MSM_NAND_READ_LOCATION_LAST_CW_1(info),
+ WRITE, rdata);
+ curr_ce++;
+ }
+ }
+ }
+
+ msm_nand_prep_ce(curr_ce, MSM_NAND_FLASH_CMD(info), WRITE,
+ data->cmd);
+ curr_ce++;
+
+ *flags_ptr |= NWD;
+ msm_nand_prep_ce(curr_ce, MSM_NAND_EXEC_CMD(info), WRITE, data->exec);
+ curr_ce++;
+ *num_ce_ptr = curr_ce - start_ce;
+}
+
+/*
+ * Function to submit read status descriptors to
+ * Data Producer Status Pipe during enhanced read Pagescope feature.
+ */
+static int msm_nand_submit_read_status_desc(struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *args,
+ struct msm_nand_info *info,
+ uint32_t curr_cw,
+ struct msm_nand_read_status_desc *status_desc)
+{
+
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct sps_pipe *data_pipe_handle = NULL;
+ uint32_t sps_flags = 0;
+ int err = 0;
+
+ /*
+ * As per QPIC2.0 HPG, Data Producer Status Pipe is used
+ * only to submit status descriptors for read page operations.
+ */
+ if (args->read)
+ data_pipe_handle = info->sps.data_prod_stat.handle;
+
+ if (ops->mode == MTD_OPS_RAW) {
+ if (args->read) {
+ if (curr_cw == (args->cwperpage - 1))
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ msm_virt_to_dma(chip, status_desc),
+ sizeof(*status_desc), NULL, sps_flags);
+ if (err)
+ goto out;
+ }
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf) {
+ if ((curr_cw == (args->cwperpage - 1)) && !ops->oobbuf)
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+
+ err = sps_transfer_one(data_pipe_handle,
+ msm_virt_to_dma(chip, status_desc),
+ sizeof(*status_desc), NULL, sps_flags);
+ if (err)
+ goto out;
+ }
+ if (ops->oobbuf && (curr_cw == (args->cwperpage - 1))) {
+ status_desc++;
+ sps_flags |= SPS_IOVEC_FLAG_INT;
+ err = sps_transfer_one(data_pipe_handle,
+ msm_virt_to_dma(chip, status_desc),
+ sizeof(*status_desc), NULL, sps_flags);
+ if (err)
+ goto out;
+ }
+ }
+out:
+ if (err)
+ pr_err("Failed to submit status descriptor for codeword=%d\n",
+ curr_cw);
+ return err;
+}
+
+/*
+ * Refer msm_nand_is_erased_page() for comments.
+ * It only differs from it in using pagescope read commands.
+ */
+static int msm_nand_is_erased_page_ps(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ struct msm_nand_rw_params *rw_params,
+ bool *erased_page)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err, submitted_num_desc = 0;
+ uint32_t n = 0, num_zero_bits = 0, total_ecc_byte_cnt;
+ struct msm_nand_rw_reg_data data;
+ struct sps_iovec *iovec;
+ struct msm_nand_sps_cmd *sps_cmd;
+ struct sps_iovec iovec_temp;
+ struct mtd_oob_ops raw_ops;
+
+ /*
+ * The following commands will be sent only once, for every single
+ * page read operation using pagescope feature - addr0, addr1,
+ * dev0_cfg0, dev0_cfg1, dev0_ecc_cfg, auto_status, flash,
+ * read_location_0, read_location_1, read_location_last_cw_0,
+ * read_location_last_cw_1, exec.
+ */
+ struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+ struct msm_nand_read_status_desc *status_desc = NULL;
+ uint32_t flash_cmd = 0x0;
+ struct {
+ struct msm_nand_sps_cmd cmd;
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[MAX_DESC];
+ struct {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC];
+ } cmd_list;
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ uint32_t erased_cw_status;
+ /* This extra +1 is for oobbuf case */
+ } result[MAX_CW_PER_PAGE + 1];
+ } *dma_buffer;
+ uint8_t *ecc;
+
+ total_ecc_byte_cnt = (chip->ecc_parity_bytes * cwperpage);
+ memcpy(&raw_ops, ops, sizeof(struct mtd_oob_ops));
+ raw_ops.mode = MTD_OPS_RAW;
+ ecc = kzalloc(total_ecc_byte_cnt, GFP_KERNEL);
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ sps_cmd = &dma_buffer->cmd;
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, &raw_ops, rw_params, &data);
+
+ if (rw_params->read) {
+ if (raw_ops.mode != MTD_OPS_RAW)
+ data.cmd = MSM_NAND_CMD_PAGE_READ_ECC_PS;
+ else
+ data.cmd = MSM_NAND_CMD_PAGE_READ_ALL_PS;
+ }
+
+ cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+ status_desc =
+ (struct msm_nand_read_status_desc *)&dma_buffer->result[0];
+
+ /* map the ecc for dma operations */
+ rw_params->ecc_dma_addr_curr = rw_params->ecc_dma_addr =
+ dma_map_single(chip->dev, ecc, total_ecc_byte_cnt,
+ DMA_FROM_DEVICE);
+
+ data.addr0 = (rw_params->page << 16) | rw_params->oob_col;
+ data.addr1 = (rw_params->page >> 16) & 0xff;
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+ dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+ }
+ msm_nand_prep_read_cmd_desc_pagescope(&raw_ops, rw_params, &data,
+ info, cmd_list, chip->ecc_parity_bytes);
+
+ dma_buffer->xfer.iovec_count = cmd_list->count;
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->setup_desc.ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->setup_desc.num_ce;
+ iovec->flags = cmd_list->setup_desc.flags;
+ iovec++;
+ for (n = 0; n < (cmd_list->count - 1); n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->cw_desc[n].ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->cw_desc[n].num_ce;
+ iovec->flags = cmd_list->cw_desc[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+ /* Submit data descriptors */
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(&raw_ops,
+ rw_params, info, n,
+ chip->ecc_parity_bytes);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ }
+ submitted_num_desc = cwperpage - rw_params->start_sector;
+ /* Submit Data Status Descriptors */
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_read_status_desc(&raw_ops,
+ rw_params, info,
+ n, status_desc);
+ if (err) {
+ pr_err("Failed to submit data status descs %d\n", err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ status_desc++;
+ }
+
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+ /* Poll for command descriptors completion */
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index,
+ dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ /* Poll for data descriptors completion */
+ err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.data_prod.index), err);
+ goto put_dev;
+ }
+ /*
+ * Poll for data status descriptors completion
+ * the number of desc. is same as data desc.
+ */
+ err = msm_nand_sps_get_iovec(info->sps.data_prod_stat.handle,
+ info->sps.data_prod_stat.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err:%d)\n",
+ (info->sps.data_prod_stat.index), err);
+ goto put_dev;
+ }
+ /*
+ * There is a H/W BUG in qpic 2.0. You should unlock the command
+ * pipe only after all the status descriptors are collected on
+ * status descriptor pipe (pipe#3).
+ */
+
+ /* Unlock the command pipe now */
+ msm_nand_prep_single_desc(sps_cmd, MSM_NAND_AUTO_STATUS_EN(info),
+ WRITE, flash_cmd, INT_UNLCK);
+ err = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &sps_cmd->ce),
+ sizeof(struct sps_command_element), NULL,
+ sps_cmd->flags);
+ if (err) {
+ pr_err("Failed to unlock cmd desc. pipe: %d\n", err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, 1, &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for cmd desc. err:%d\n", err);
+ goto put_dev;
+ }
+ err = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_dma;
+
+ pr_debug("addr0: 0x%08x, addr1: 0x%08x\n", data.addr0, data.addr1);
+ for (n = rw_params->start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status,
+ dma_buffer->result[n].erased_cw_status);
+
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ /* unmap ecc dma memory */
+ dma_unmap_single(chip->dev, rw_params->ecc_dma_addr,
+ total_ecc_byte_cnt, DMA_FROM_DEVICE);
+ /* check for bit flips in ecc data */
+ for (n = rw_params->start_sector; n < cwperpage; n++) {
+ uint8_t *ecc_temp = ecc;
+ int last_pos = 0, next_pos = 0;
+ int ecc_bytes_percw_in_bits = (chip->ecc_parity_bytes * 8);
+
+ do {
+ last_pos = find_next_zero_bit((void *)ecc_temp,
+ ecc_bytes_percw_in_bits, next_pos);
+
+ if (last_pos < ecc_bytes_percw_in_bits)
+ num_zero_bits++;
+
+ if (num_zero_bits > 4) {
+ *erased_page = false;
+ goto free_mem;
+ }
+
+ next_pos = last_pos + 1;
+ } while (last_pos < ecc_bytes_percw_in_bits);
+
+ num_zero_bits = last_pos = next_pos = 0;
+ ecc_temp += chip->ecc_parity_bytes;
+ }
+
+ if ((n == cwperpage) && (num_zero_bits <= 4))
+ *erased_page = true;
+free_mem:
+ kfree(ecc);
+ return err;
+}
+
+/*
+ * Function that gets called from upper layers such as MTD/YAFFS2 to read a
+ * page with main or/and spare data.
+ * Function to be called for enhanced read pagescope feature.
+ */
+static int msm_nand_read_pagescope(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct msm_nand_info *info = mtd->priv;
+ struct msm_nand_chip *chip = &info->nand_chip;
+ struct flash_identification *flash_dev = &info->flash_dev;
+ uint32_t cwperpage = (mtd->writesize >> 9);
+ int err = 0, pageerr = 0, rawerr = 0, submitted_num_desc = 0;
+ uint32_t n = 0, pages_read = 0, flash_cmd = 0x0;
+ uint32_t ecc_errors = 0, total_ecc_errors = 0, ecc_capability;
+ struct msm_nand_rw_params rw_params;
+ struct msm_nand_rw_reg_data data;
+ struct sps_iovec *iovec;
+ struct msm_nand_sps_cmd *sps_cmd;
+ struct sps_iovec iovec_temp;
+ bool erased_page;
+ uint64_t fix_data_in_pages = 0;
+
+ /*
+ * The following commands will be sent only once, for every single
+ * page read operation using pagescope feature - addr0, addr1,
+ * dev0_cfg0, dev0_cfg1, dev0_ecc_cfg, auto_status, flash,
+ * read_location_0, read_location_1, read_location_last_cw_0,
+ * read_location_last_cw_1, exec.
+ */
+ struct {
+ struct msm_nand_sps_cmd cmd;
+ struct sps_transfer xfer;
+ struct sps_iovec cmd_iovec[MAX_DESC];
+ struct {
+ uint32_t count;
+ struct msm_nand_cmd_setup_desc setup_desc;
+ struct msm_nand_cmd_cw_desc cw_desc[MAX_DESC];
+ } cmd_list;
+ struct {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ uint32_t erased_cw_status;
+ /* This extra +1 is for oobbuf case */
+ } result[MAX_CW_PER_PAGE + 1];
+ } *dma_buffer;
+ struct msm_nand_rw_cmd_desc *cmd_list = NULL;
+ struct msm_nand_read_status_desc *status_desc = NULL;
+
+ memset(&rw_params, 0, sizeof(struct msm_nand_rw_params));
+ err = msm_nand_validate_mtd_params(mtd, true, from, ops, &rw_params);
+ if (err)
+ goto validate_mtd_params_failed;
+
+ wait_event(chip->dma_wait_queue, (dma_buffer = msm_nand_get_dma_buffer(
+ chip, sizeof(*dma_buffer))));
+
+ rw_params.oob_col = rw_params.start_sector * chip->cw_size;
+ if (chip->cfg1 & (1 << WIDE_FLASH))
+ rw_params.oob_col >>= 1;
+ sps_cmd = &dma_buffer->cmd;
+
+ memset(&data, 0, sizeof(struct msm_nand_rw_reg_data));
+ msm_nand_update_rw_reg_data(chip, ops, &rw_params, &data);
+
+ if (rw_params.read) {
+ if (ops->mode != MTD_OPS_RAW)
+ data.cmd = MSM_NAND_CMD_PAGE_READ_ECC_PS;
+ else
+ data.cmd = MSM_NAND_CMD_PAGE_READ_ALL_PS;
+ }
+
+ cmd_list = (struct msm_nand_rw_cmd_desc *)&dma_buffer->cmd_list;
+ status_desc =
+ (struct msm_nand_read_status_desc *)&dma_buffer->result[0];
+ ecc_capability = flash_dev->ecc_capability;
+
+ while (rw_params.page_count-- > 0) {
+
+ erased_page = false;
+ data.addr0 = (rw_params.page << 16) | rw_params.oob_col;
+ data.addr1 = (rw_params.page >> 16) & 0xff;
+
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ dma_buffer->result[n].flash_status = 0xeeeeeeee;
+ dma_buffer->result[n].buffer_status = 0xeeeeeeee;
+ dma_buffer->result[n].erased_cw_status = 0xeeeeee00;
+ }
+ msm_nand_prep_read_cmd_desc_pagescope(ops, &rw_params,
+ &data, info,
+ cmd_list, 0);
+ dma_buffer->xfer.iovec_count = cmd_list->count;
+ dma_buffer->xfer.iovec = dma_buffer->cmd_iovec;
+ dma_buffer->xfer.iovec_phys = msm_virt_to_dma(chip,
+ &dma_buffer->cmd_iovec);
+ iovec = dma_buffer->xfer.iovec;
+
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->setup_desc.ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->setup_desc.num_ce;
+ iovec->flags = cmd_list->setup_desc.flags;
+ iovec++;
+ for (n = 0; n < (cmd_list->count - 1); n++) {
+ iovec->addr = msm_virt_to_dma(chip,
+ &cmd_list->cw_desc[n].ce[0]);
+ iovec->size = sizeof(struct sps_command_element) *
+ cmd_list->cw_desc[n].num_ce;
+ iovec->flags = cmd_list->cw_desc[n].flags;
+ iovec++;
+ }
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(chip->dev);
+ if (err)
+ goto unlock_mutex;
+ /* Submit data descriptors */
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_rw_data_desc(ops,
+ &rw_params, info, n, 0);
+ if (err) {
+ pr_err("Failed to submit data descs %d\n", err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ }
+ if (ops->mode == MTD_OPS_RAW) {
+ submitted_num_desc = cwperpage - rw_params.start_sector;
+ } else if (ops->mode == MTD_OPS_AUTO_OOB) {
+ if (ops->datbuf)
+ submitted_num_desc = cwperpage -
+ rw_params.start_sector;
+ if (ops->oobbuf)
+ submitted_num_desc++;
+ }
+ /* Submit Data Status Descriptors */
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ err = msm_nand_submit_read_status_desc(ops,
+ &rw_params, info,
+ n, status_desc);
+ if (err) {
+ pr_err("Failed to submit data status descs %d\n",
+ err);
+ panic("error in nand driver\n");
+ goto put_dev;
+ }
+ status_desc++;
+ }
+ /* Submit command descriptors */
+ err = sps_transfer(info->sps.cmd_pipe.handle,
+ &dma_buffer->xfer);
+ if (err) {
+ pr_err("Failed to submit commands %d\n", err);
+ goto put_dev;
+ }
+ /* Poll for command descriptors completion */
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index,
+ dma_buffer->xfer.iovec_count,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+ (info->sps.cmd_pipe.index), err);
+ goto put_dev;
+ }
+ /* Poll for data descriptors completion */
+ err = msm_nand_sps_get_iovec(info->sps.data_prod.handle,
+ info->sps.data_prod.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+ (info->sps.data_prod.index), err);
+ goto put_dev;
+ }
+ /*
+ * Poll for data status descriptors completion
+ * the number of desc. is same as data desc.
+ */
+ err = msm_nand_sps_get_iovec(info->sps.data_prod_stat.handle,
+ info->sps.data_prod_stat.index, submitted_num_desc,
+ &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for pipe %d: (err: %d)\n",
+ (info->sps.data_prod_stat.index), err);
+ goto put_dev;
+ }
+ /*
+ * There is a H/W BUG in qpic 2.0. You should unlock the command
+ * pipe only after all the status descriptors are collected on
+ * status descriptor pipe (pipe#3).
+ */
+
+ /* Unlock the command pipe now */
+ msm_nand_prep_single_desc(sps_cmd,
+ MSM_NAND_AUTO_STATUS_EN(info),
+ WRITE, flash_cmd, INT_UNLCK);
+ err = sps_transfer_one(info->sps.cmd_pipe.handle,
+ msm_virt_to_dma(chip, &sps_cmd->ce),
+ sizeof(struct sps_command_element),
+ NULL, sps_cmd->flags);
+ if (err) {
+ pr_err("Failed to unlock cmd desc. pipe: %d\n", err);
+ goto put_dev;
+ }
+ err = msm_nand_sps_get_iovec(info->sps.cmd_pipe.handle,
+ info->sps.cmd_pipe.index, 1, &iovec_temp);
+ if (err) {
+ pr_err("Failed to get iovec for cmd desc. err:%d\n",
+ err);
+ goto put_dev;
+ }
+ err = msm_nand_put_device(chip->dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_dma;
+ /* Check for flash status errors */
+ pageerr = rawerr = 0;
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].flash_status & (FS_OP_ERR |
+ FS_MPU_ERR)) {
+ rawerr = -EIO;
+ /*
+ * Check if ECC error was due to an erased
+ * codeword. If so, ignore the error.
+ *
+ * NOTE: There is a bug in erased page
+ * detection hardware block when reading
+ * only spare data. In order to work around
+ * this issue, instead of using PAGE_ALL_ERASED
+ * bit to check for whether a whole page is
+ * erased or not, we use CODEWORD_ALL_ERASED
+ * and CODEWORD_ERASED bits together and check
+ * each codeword that has FP_OP_ERR bit set is
+ * an erased codeword or not.
+ */
+ if ((dma_buffer->result[n].erased_cw_status &
+ ERASED_CW) == ERASED_CW) {
+ /*
+ * At least one code word is detected
+ * as an erased code word.
+ */
+ pr_debug("erased codeword detected - ignore ecc error\n");
+ continue;
+ }
+ pageerr = rawerr;
+ break;
+ }
+ }
+ /* check for uncorrectable errors */
+ if (pageerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ if (dma_buffer->result[n].buffer_status &
+ BS_UNCORRECTABLE_BIT) {
+ /*
+ * Check if page is actually
+ * erased or not.
+ */
+ err = msm_nand_is_erased_page_ps(mtd,
+ from, ops,
+ &rw_params,
+ &erased_page);
+ if (err)
+ goto free_dma;
+ if (!erased_page) {
+ mtd->ecc_stats.failed++;
+ pageerr = -EBADMSG;
+ break;
+ }
+ pageerr = 0;
+ pr_debug("Uncorrectable ECC errors detected on an erased page and has been fixed.\n");
+ break;
+ }
+ }
+ }
+
+ if (rawerr && !pageerr && erased_page) {
+ /*
+ * This means an erased page had bit flips and now
+ * those bit-flips need to be cleared in the data
+ * being sent to upper layers. This will keep track
+ * of those pages and at the end, the data will be
+ * fixed before this function returns.
+ * Note that a whole page worth of data will be fixed
+ * and this will only handle about 64 pages being read
+ * at a time i.e. one erase block worth of pages.
+ */
+ fix_data_in_pages |= BIT(rw_params.page_count);
+ }
+ /* check for correctable errors */
+ if (!rawerr) {
+ for (n = rw_params.start_sector; n < cwperpage; n++) {
+ ecc_errors =
+ dma_buffer->result[n].buffer_status
+ & BS_CORRECTABLE_ERR_MSK;
+ if (ecc_errors) {
+ total_ecc_errors += ecc_errors;
+ mtd->ecc_stats.corrected += ecc_errors;
+ /*
+ * Since the nand device can have the
+ * ecc errors even on the first ever
+ * write. Any reporting of EUCLEAN
+ * when there are less then the ecc
+ * capability of the device is not
+ * useful.
+ *
+ * Also don't report EUCLEAN unless
+ * the enable_euclean is set.
+ */
+ if (enable_euclean &&
+ ecc_errors >= ecc_capability)
+ pageerr = -EUCLEAN;
+ }
+ }
+ }
+ if (pageerr && (pageerr != -EUCLEAN || err == 0))
+ err = pageerr;
+
+ if (rawerr && !pageerr) {
+ pr_debug("%llx %x %x empty page\n",
+ (loff_t)rw_params.page * mtd->writesize,
+ ops->len, ops->ooblen);
+ } else {
+ for (n = rw_params.start_sector; n < cwperpage; n++)
+ pr_debug("cw %d: flash_sts %x buffr_sts %x, erased_cw_status: %x, pageerr: %d, rawerr: %d\n",
+ n, dma_buffer->result[n].flash_status,
+ dma_buffer->result[n].buffer_status,
+ dma_buffer->result[n].erased_cw_status,
+ pageerr, rawerr);
+ }
+ if (err && err != -EUCLEAN && err != -EBADMSG)
+ goto free_dma;
+ pages_read++;
+ rw_params.page++;
+ }
+ goto free_dma;
+put_dev:
+ msm_nand_put_device(chip->dev);
+unlock_mutex:
+ mutex_unlock(&info->lock);
+free_dma:
+ msm_nand_release_dma_buffer(chip, dma_buffer, sizeof(*dma_buffer));
+ if (ops->oobbuf)
+ dma_unmap_page(chip->dev, rw_params.oob_dma_addr,
+ ops->ooblen, DMA_FROM_DEVICE);
+ if (ops->datbuf)
+ dma_unmap_page(chip->dev, rw_params.data_dma_addr,
+ ops->len, DMA_BIDIRECTIONAL);
+ /*
+ * If there were any erased pages detected with ECC errors, then
+ * it is most likely that the data is not all 0xff. So memset that
+ * page to all 0xff.
+ */
+ while (fix_data_in_pages) {
+ int temp_page = 0, oobsize = rw_params.cwperpage << 2;
+ int count = 0, offset = 0;
+
+ temp_page = fix_data_in_pages & BIT_MASK(0);
+ fix_data_in_pages = fix_data_in_pages >> 1;
+ count++;
+ if (!temp_page)
+ continue;
+ offset = (count - 1) * mtd->writesize;
+ if (ops->datbuf)
+ memset((ops->datbuf + offset), 0xff, mtd->writesize);
+ offset = (count - 1) * oobsize;
+ if (ops->oobbuf)
+ memset(ops->oobbuf + offset, 0xff, oobsize);
+ }
+validate_mtd_params_failed:
+ if (ops->mode != MTD_OPS_RAW)
+ ops->retlen = mtd->writesize * pages_read;
+ else
+ ops->retlen = (mtd->writesize + mtd->oobsize) * pages_read;
+ ops->oobretlen = ops->ooblen - rw_params.oob_len_data;
+ if (err)
+ pr_err("0x%llx datalen 0x%x ooblen %x err %d corrected %d\n",
+ from, ops->datbuf ? ops->len : 0, ops->ooblen, err,
+ total_ecc_errors);
+ pr_debug("ret %d, retlen %d oobretlen %d\n",
+ err, ops->retlen, ops->oobretlen);
+ return err;
+}
+
+/*
* Read ECC bytes and check whether page is erased or not.
*
* The NAND devices manufactured with newer process node technology are
@@ -2189,6 +3035,7 @@ static int msm_nand_read_partial_page(struct mtd_info *mtd,
size_t actual_len, ret_len;
int is_euclean = 0;
int is_ebadmsg = 0;
+ struct msm_nand_info *info = mtd->priv;
actual_len = ops->len;
ret_len = 0;
@@ -2220,7 +3067,10 @@ static int msm_nand_read_partial_page(struct mtd_info *mtd,
no_copy = false;
ops->datbuf = no_copy ? actual_buf : bounce_buf;
- err = msm_nand_read_oob(mtd, aligned_from, ops);
+ if (info->nand_chip.caps & MSM_NAND_CAP_PAGE_SCOPE_READ)
+ err = msm_nand_read_pagescope(mtd, aligned_from, ops);
+ else
+ err = msm_nand_read_oob(mtd, aligned_from, ops);
if (err == -EUCLEAN) {
is_euclean = 1;
err = 0;
@@ -2277,6 +3127,7 @@ static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
int is_ebadmsg = 0;
struct mtd_oob_ops ops;
unsigned char *bounce_buf = NULL;
+ struct msm_nand_info *info = mtd->priv;
ops.mode = MTD_OPS_AUTO_OOB;
ops.retlen = 0;
@@ -2308,7 +3159,14 @@ static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
ops.datbuf = (uint8_t *) buf;
no_copy = true;
}
- ret = msm_nand_read_oob(mtd, from, &ops);
+ if (info->nand_chip.caps &
+ MSM_NAND_CAP_PAGE_SCOPE_READ) {
+ ret = msm_nand_read_pagescope(mtd,
+ from, &ops);
+ } else {
+ ret = msm_nand_read_oob(mtd,
+ from, &ops);
+ }
if (ret == -EUCLEAN) {
is_euclean = 1;
ret = 0;
@@ -2348,7 +3206,11 @@ static int msm_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
} else {
ops.len = len;
ops.datbuf = (uint8_t *)buf;
- ret = msm_nand_read_oob(mtd, from, &ops);
+ if (info->nand_chip.caps &
+ MSM_NAND_CAP_PAGE_SCOPE_READ)
+ ret = msm_nand_read_pagescope(mtd, from, &ops);
+ else
+ ret = msm_nand_read_oob(mtd, from, &ops);
*retlen = ops.retlen;
}
} else {
@@ -3269,7 +4131,9 @@ static int msm_nand_scan(struct mtd_info *mtd)
mtd->_block_markbad = msm_nand_block_markbad;
mtd->_read = msm_nand_read;
mtd->_write = msm_nand_write;
- mtd->_read_oob = msm_nand_read_oob;
+ mtd->_read_oob = (chip->caps & MSM_NAND_CAP_PAGE_SCOPE_READ) ?
+ msm_nand_read_pagescope :
+ msm_nand_read_oob;
mtd->_write_oob = msm_nand_write_oob;
mtd->owner = THIS_MODULE;
out:
@@ -3776,14 +4640,28 @@ static int msm_nand_probe(struct platform_device *pdev)
}
info->nand_chip.qpic_version = qpic_version.qpic_major;
if (info->nand_chip.qpic_version >= 2) {
+ info->nand_chip.caps = MSM_NAND_CAP_PAGE_SCOPE_READ;
+ mutex_lock(&info->lock);
+ err = msm_nand_get_device(info->nand_chip.dev);
+ if (err) {
+ pr_err("Failed to get the device err=%d\n", err);
+ mutex_unlock(&info->lock);
+ goto free_bam;
+ }
err = msm_nand_init_endpoint(info,
&info->sps.data_prod_stat,
SPS_DATA_PROD_STAT_PIPE_INDEX);
if (err) {
pr_err("Failed to configure read status pipe err=%d\n",
err);
+ msm_nand_put_device(info->nand_chip.dev);
+ mutex_unlock(&info->lock);
goto free_bam;
}
+ err = msm_nand_put_device(info->nand_chip.dev);
+ mutex_unlock(&info->lock);
+ if (err)
+ goto free_bam;
}
err = msm_nand_parse_smem_ptable(&nr_parts);
if (err < 0) {
@@ -3810,6 +4688,7 @@ static int msm_nand_probe(struct platform_device *pdev)
info->nand_phys, info->bam_phys, info->bam_irq);
pr_info("Allocated DMA buffer at virt_addr 0x%pK, phys_addr 0x%x\n",
info->nand_chip.dma_virt_addr, info->nand_chip.dma_phys_addr);
+ pr_info("Host capabilities:0x%08x\n", info->nand_chip.caps);
msm_nand_init_sysfs(dev);
msm_nand_init_perf_stats(info);
goto out;
diff --git a/drivers/mtd/devices/msm_qpic_nand.h b/drivers/mtd/devices/msm_qpic_nand.h
index dcdb81abc926..d6c6d4080e48 100644
--- a/drivers/mtd/devices/msm_qpic_nand.h
+++ b/drivers/mtd/devices/msm_qpic_nand.h
@@ -182,6 +182,14 @@
#define MSM_NAND_READ_LOCATION_1(info) MSM_NAND_REG(info, 0x30F24)
#define MSM_NAND_READ_LOCATION_LAST_CW_0(info) MSM_NAND_REG(info, 0x30F40)
#define MSM_NAND_READ_LOCATION_LAST_CW_1(info) MSM_NAND_REG(info, 0x30F44)
+#define MSM_NAND_AUTO_STATUS_EN(info) MSM_NAND_REG(info, 0x3002c)
+
+#define NAND_FLASH_STATUS_EN BIT(0)
+#define NANDC_BUFFER_STATUS_EN BIT(1)
+#define NAND_ERASED_CW_DETECT_STATUS_EN BIT(3)
+#define NAND_FLASH_STATUS_LAST_CW_EN BIT(16)
+#define NANDC_BUFFER_STATUS_LAST_CW_EN BIT(17)
+#define NAND_ERASED_CW_DETECT_STATUS_LAST_CW_EN BIT(19)
/* device commands */
#define MSM_NAND_CMD_PAGE_READ 0x32
@@ -194,6 +202,11 @@
#define MSM_NAND_CMD_BLOCK_ERASE 0x3A
#define MSM_NAND_CMD_FETCH_ID 0x0B
+/* device read commands for pagescope */
+
+#define MSM_NAND_CMD_PAGE_READ_ECC_PS 0x800033
+#define MSM_NAND_CMD_PAGE_READ_ALL_PS 0x800034
+
/* Version Mask */
#define MSM_NAND_VERSION_MAJOR_MASK 0xF0000000
#define MSM_NAND_VERSION_MAJOR_SHIFT 28
@@ -232,6 +245,17 @@ struct msm_nand_rw_cmd_desc {
};
/*
+ * Structure that holds the flash, buffer,
+ * erased codeword status after every codeword
+ * read during Pagescope read operation.
+ */
+struct msm_nand_read_status_desc {
+ uint32_t flash_status;
+ uint32_t buffer_status;
+ uint32_t erased_cw_status;
+};
+
+/*
* Structure that defines the NAND controller properties as per the
* NAND flash device/chip that is attached.
*/
@@ -258,6 +282,9 @@ struct msm_nand_chip {
uint32_t ecc_bch_cfg;
uint32_t ecc_cfg_raw;
uint32_t qpic_version; /* To store the qpic controller version */
+ uint32_t caps; /* General host capabilities */
+#define MSM_NAND_CAP_PAGE_SCOPE_READ BIT(0)
+#define MSM_NAND_CAP_MULTI_PAGE_READ BIT(1)
};
/* Structure that defines an SPS end point for a NANDc BAM pipe. */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index d177a5b17938..093fcbca23c1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -264,6 +264,8 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice(dev);
+ qmi_rmnet_qos_exit_post();
+
rmnet_unregister_real_device(real_dev, port);
}
@@ -301,11 +303,14 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
hlist_del(&ep->hlnode);
kfree(ep);
}
+
/* Unregistering devices in context before freeing port.
* If this API becomes non-context their order should switch.
*/
unregister_netdevice_many(&list);
+ qmi_rmnet_qos_exit_post();
+
rmnet_unregister_real_device(real_dev, port);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index f1a814bbae29..6f549fc3fbd9 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -128,8 +128,7 @@ static void rmnet_vnd_uninit(struct net_device *dev)
qos = priv->qos_info;
RCU_INIT_POINTER(priv->qos_info, NULL);
- synchronize_rcu();
- qmi_rmnet_qos_exit(dev, qos);
+ qmi_rmnet_qos_exit_pre(qos);
}
static void rmnet_get_stats64(struct net_device *dev,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index b10822453945..9167d64c2bec 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -714,6 +714,8 @@ struct msm_pcie_dev_t {
struct mutex recovery_lock;
spinlock_t wakeup_lock;
spinlock_t irq_lock;
+ struct mutex aspm_lock;
+ int prevent_l1;
ulong linkdown_counter;
ulong link_turned_on_counter;
ulong link_turned_off_counter;
@@ -1370,6 +1372,8 @@ static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
dev->linkdown_counter);
PCIE_DBG_FS(dev, "wake_counter: %lu\n",
dev->wake_counter);
+ PCIE_DBG_FS(dev, "prevent_l1: %d\n",
+ dev->prevent_l1);
PCIE_DBG_FS(dev, "target_link_speed: 0x%x\n",
dev->target_link_speed);
PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
@@ -6153,20 +6157,6 @@ static int msm_pcie_link_retrain(struct msm_pcie_dev_t *pcie_dev,
u32 cnt_max = 1000; /* 100ms timeout */
u32 link_status_lbms_mask = PCI_EXP_LNKSTA_LBMS << PCI_EXP_LNKCTL;
- cnt = 0;
- /* confirm link is in L0 */
- while (((readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) &
- MSM_PCIE_LTSSM_MASK)) != MSM_PCIE_LTSSM_L0) {
- if (unlikely(cnt++ >= cnt_max)) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to transition to L0\n",
- pcie_dev->rc_idx);
- return -EIO;
- }
-
- usleep_range(100, 105);
- }
-
/* link retrain */
msm_pcie_config_clear_set_dword(pci_dev,
pci_dev->pcie_cap + PCI_EXP_LNKCTL,
@@ -6215,6 +6205,99 @@ static int msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
return 0;
}
+void msm_pcie_allow_l1(struct pci_dev *pci_dev)
+{
+ struct pci_dev *root_pci_dev;
+ struct msm_pcie_dev_t *pcie_dev;
+
+ root_pci_dev = pci_find_pcie_root_port(pci_dev);
+ if (!root_pci_dev)
+ return;
+
+ pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
+
+ mutex_lock(&pcie_dev->aspm_lock);
+ if (unlikely(--pcie_dev->prevent_l1 < 0))
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: %02x:%02x.%01x: unbalanced prevent_l1: %d < 0\n",
+ pcie_dev->rc_idx, pci_dev->bus->number,
+ PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
+ pcie_dev->prevent_l1);
+
+ if (pcie_dev->prevent_l1) {
+ mutex_unlock(&pcie_dev->aspm_lock);
+ return;
+ }
+
+ msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
+ /* enable L1 */
+ msm_pcie_write_mask(pcie_dev->dm_core +
+ (root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
+ 0, PCI_EXP_LNKCTL_ASPM_L1);
+
+ PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x: exit\n",
+ pcie_dev->rc_idx, pci_dev->bus->number,
+ PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+ mutex_unlock(&pcie_dev->aspm_lock);
+}
+EXPORT_SYMBOL(msm_pcie_allow_l1);
+
+int msm_pcie_prevent_l1(struct pci_dev *pci_dev)
+{
+ struct pci_dev *root_pci_dev;
+ struct msm_pcie_dev_t *pcie_dev;
+ u32 cnt = 0;
+ u32 cnt_max = 1000; /* 100ms timeout */
+ int ret = 0;
+
+ root_pci_dev = pci_find_pcie_root_port(pci_dev);
+ if (!root_pci_dev)
+ return -ENODEV;
+
+ pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
+
+ /* disable L1 */
+ mutex_lock(&pcie_dev->aspm_lock);
+ if (pcie_dev->prevent_l1++) {
+ mutex_unlock(&pcie_dev->aspm_lock);
+ return 0;
+ }
+
+ msm_pcie_write_mask(pcie_dev->dm_core +
+ (root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
+ PCI_EXP_LNKCTL_ASPM_L1, 0);
+ msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(5));
+
+ /* confirm link is in L0 */
+ while (((readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) &
+ MSM_PCIE_LTSSM_MASK)) != MSM_PCIE_LTSSM_L0) {
+ if (unlikely(cnt++ >= cnt_max)) {
+ PCIE_ERR(pcie_dev,
+ "PCIe: RC%d: %02x:%02x.%01x: failed to transition to L0\n",
+ pcie_dev->rc_idx, pci_dev->bus->number,
+ PCI_SLOT(pci_dev->devfn),
+ PCI_FUNC(pci_dev->devfn));
+ ret = -EIO;
+ goto err;
+ }
+
+ usleep_range(100, 105);
+ }
+
+ PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x: exit\n",
+ pcie_dev->rc_idx, pci_dev->bus->number,
+ PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
+ mutex_unlock(&pcie_dev->aspm_lock);
+
+ return 0;
+err:
+ mutex_unlock(&pcie_dev->aspm_lock);
+ msm_pcie_allow_l1(pci_dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_pcie_prevent_l1);
+
int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
u16 target_link_width)
{
@@ -6264,9 +6347,10 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
PCI_EXP_LNKSTA_CLS,
target_link_speed);
- /* disable link L1. Need to be in L0 for gen switch */
- msm_pcie_config_l1(pcie_dev, root_pci_dev, false);
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(5));
+ /* need to be in L0 for gen switch */
+ ret = msm_pcie_prevent_l1(root_pci_dev);
+ if (ret)
+ return ret;
if (target_link_speed > current_link_speed)
msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
@@ -6289,9 +6373,7 @@ int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
if (target_link_speed < current_link_speed)
msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
out:
- /* re-enable link L1 */
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
- msm_pcie_config_l1(pcie_dev, root_pci_dev, true);
+ msm_pcie_allow_l1(root_pci_dev);
return ret;
}
@@ -6555,6 +6637,7 @@ static int __init pcie_init(void)
mutex_init(&msm_pcie_dev[i].setup_lock);
mutex_init(&msm_pcie_dev[i].clk_lock);
mutex_init(&msm_pcie_dev[i].recovery_lock);
+ mutex_init(&msm_pcie_dev[i].aspm_lock);
spin_lock_init(&msm_pcie_dev[i].wakeup_lock);
spin_lock_init(&msm_pcie_dev[i].irq_lock);
msm_pcie_dev[i].drv_ready = false;
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs405.c b/drivers/pinctrl/qcom/pinctrl-qcs405.c
index 7e974592ed5e..01b77297cbba 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs405.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs405.c
@@ -1654,7 +1654,7 @@ static const struct msm_pingroup qcs405_groups[] = {
NA, NA, NA, NA, NA, NA, NA),
[92] = PINGROUP(92, EAST, i2s_1_data3, i2s_1_data4_dsd4,
NA, NA, NA, NA, NA, qdss_cti_trig_in_a1, NA),
- [93] = PINGROUP(93, EAST, i2s_1_data5_dsd5, pwm_led22, i2s_1,
+ [93] = PINGROUP(93, EAST, i2s_1, pwm_led22, i2s_1_data5_dsd5,
NA, NA, NA, NA, NA, qdss_tracedata_b),
[94] = PINGROUP(94, EAST, i2s_1, pwm_led23, i2s_1, NA,
qdss_cti_trig_out_a0, NA, rgmi_dll2, NA, NA),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c
index d99fc644f641..25f498986d63 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth.c
@@ -103,6 +103,11 @@ static int ipa_eth_init_device(struct ipa_eth_device *eth_dev)
return rc;
}
+ rc = ipa_eth_uc_stats_init(eth_dev);
+ if (rc)
+ ipa_eth_dev_err(eth_dev,
+ "Failed to init uC stats monitor, continuing.");
+
ipa_eth_dev_log(eth_dev, "Initialized device");
eth_dev->of_state = IPA_ETH_OF_ST_INITED;
@@ -120,6 +125,11 @@ static int ipa_eth_deinit_device(struct ipa_eth_device *eth_dev)
if (eth_dev->of_state != IPA_ETH_OF_ST_INITED)
return -EFAULT;
+ rc = ipa_eth_uc_stats_deinit(eth_dev);
+ if (rc)
+ ipa_eth_dev_err(eth_dev,
+ "Failed to deinit uC stats monitor, continuing.");
+
rc = ipa_eth_offload_deinit(eth_dev);
if (rc) {
ipa_eth_dev_err(eth_dev, "Failed to deinit offload");
@@ -187,6 +197,11 @@ static int ipa_eth_start_device(struct ipa_eth_device *eth_dev)
return rc;
}
+ rc = ipa_eth_uc_stats_start(eth_dev);
+ if (rc)
+ ipa_eth_dev_err(eth_dev,
+ "Failed to start uC stats monitor, continuing.");
+
ipa_eth_dev_log(eth_dev, "Started device");
eth_dev->of_state = IPA_ETH_OF_ST_STARTED;
@@ -204,6 +219,11 @@ static int ipa_eth_stop_device(struct ipa_eth_device *eth_dev)
if (eth_dev->of_state != IPA_ETH_OF_ST_STARTED)
return -EFAULT;
+ rc = ipa_eth_uc_stats_stop(eth_dev);
+ if (rc)
+ ipa_eth_dev_err(eth_dev,
+ "Failed to stop uC stats monitor, continuing.");
+
rc = ipa_eth_ep_unregister_interface(eth_dev);
if (rc) {
ipa_eth_dev_err(eth_dev, "Failed to unregister IPA interface");
@@ -322,8 +342,7 @@ static void ipa_eth_device_refresh_work(struct work_struct *work)
void ipa_eth_device_refresh_sched(struct ipa_eth_device *eth_dev)
{
- if (present(eth_dev))
- queue_work(ipa_eth_wq, &eth_dev->refresh);
+ queue_work(ipa_eth_wq, &eth_dev->refresh);
}
void ipa_eth_device_refresh_sync(struct ipa_eth_device *eth_dev)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h
index f2e2f567953b..6736c740f8e4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_i.h
@@ -194,6 +194,11 @@ int ipa_eth_pm_deactivate(struct ipa_eth_device *eth_dev);
int ipa_eth_pm_vote_bw(struct ipa_eth_device *eth_dev);
+int ipa_eth_uc_stats_init(struct ipa_eth_device *eth_dev);
+int ipa_eth_uc_stats_deinit(struct ipa_eth_device *eth_dev);
+int ipa_eth_uc_stats_start(struct ipa_eth_device *eth_dev);
+int ipa_eth_uc_stats_stop(struct ipa_eth_device *eth_dev);
+
/* ipa_eth_utils.c APIs */
const char *ipa_eth_device_event_name(enum ipa_eth_device_event event);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_uc.c b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_uc.c
index 1fda16c2fe21..20bc35a9ac1e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ethernet/ipa_eth_uc.c
@@ -109,3 +109,126 @@ int ipa_eth_uc_send_cmd(enum ipa_eth_uc_op op, u32 protocol,
return rc;
}
EXPORT_SYMBOL(ipa_eth_uc_send_cmd);
+
+/* Define IPA hardware constants not available from IPA header files */
+
+#define IPA_HW_DIR_PRODUCER 0
+#define IPA_HW_DIR_CONSUMER 1
+
+#define IPA_HW_CH_ID_INVALID 0xFF
+#define IPA_HW_PROTOCOL_INVALID 0xFFFFFFFF
+
+static u32 find_uc_protocol(struct ipa_eth_device *eth_dev)
+{
+ int protocol;
+ struct ipa_eth_channel *ch;
+
+ list_for_each_entry(ch, &eth_dev->rx_channels, channel_list) {
+ protocol = ipa_get_prot_id(ch->ipa_client);
+ if (protocol >= 0)
+ return lower_32_bits(protocol);
+ }
+
+ list_for_each_entry(ch, &eth_dev->tx_channels, channel_list) {
+ protocol = ipa_get_prot_id(ch->ipa_client);
+ if (protocol >= 0)
+ return lower_32_bits(protocol);
+ }
+
+ return IPA_HW_PROTOCOL_INVALID;
+}
+
+static int find_client_channel(enum ipa_client_type client)
+{
+ const struct ipa_gsi_ep_config *gsi_ep_cfg;
+
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(client);
+ if (!gsi_ep_cfg)
+ return -EFAULT;
+
+ return gsi_ep_cfg->ipa_gsi_chan_num;
+}
+
+int ipa_eth_uc_stats_init(struct ipa_eth_device *eth_dev)
+{
+ return 0;
+}
+
+int ipa_eth_uc_stats_deinit(struct ipa_eth_device *eth_dev)
+{
+ u32 protocol = find_uc_protocol(eth_dev);
+
+ if (protocol == IPA_HW_PROTOCOL_INVALID)
+ return -EFAULT;
+
+ return ipa_uc_debug_stats_dealloc(protocol);
+}
+
+static void __fill_stats_info(
+ struct ipa_eth_channel *ch,
+ struct IpaOffloadStatschannel_info *ch_info,
+ bool start)
+{
+ ch_info->dir = IPA_ETH_CH_IS_RX(ch) ?
+ IPA_HW_DIR_CONSUMER : IPA_HW_DIR_PRODUCER;
+
+ if (start) {
+ int gsi_ch = find_client_channel(ch->ipa_client);
+
+ if (gsi_ch < 0) {
+ ipa_eth_dev_err(ch->eth_dev,
+ "Failed to determine GSI channel for client %d",
+ ch->ipa_client);
+ gsi_ch = IPA_HW_CH_ID_INVALID;
+ }
+
+ ch_info->ch_id = (u8) gsi_ch;
+ } else {
+ ch_info->ch_id = IPA_HW_CH_ID_INVALID;
+ }
+}
+
+static int ipa_eth_uc_stats_control(struct ipa_eth_device *eth_dev, bool start)
+{
+ int stats_idx = 0;
+ struct ipa_eth_channel *ch;
+ u32 protocol = find_uc_protocol(eth_dev);
+ struct IpaHwOffloadStatsAllocCmdData_t stats_info;
+
+ if (protocol == IPA_HW_PROTOCOL_INVALID) {
+ ipa_eth_dev_err(eth_dev, "Failed find to uC protocol");
+ return -EFAULT;
+ }
+
+ memset(&stats_info, 0, sizeof(stats_info));
+
+ stats_info.protocol = protocol;
+
+ list_for_each_entry(ch, &eth_dev->rx_channels, channel_list) {
+ if (stats_idx == IPA_MAX_CH_STATS_SUPPORTED)
+ break;
+
+ __fill_stats_info(ch,
+ &stats_info.ch_id_info[stats_idx++], start);
+ }
+
+ list_for_each_entry(ch, &eth_dev->tx_channels, channel_list) {
+ if (stats_idx == IPA_MAX_CH_STATS_SUPPORTED)
+ break;
+
+ __fill_stats_info(ch,
+ &stats_info.ch_id_info[stats_idx++], start);
+ }
+
+ return ipa_uc_debug_stats_alloc(stats_info);
+}
+
+int ipa_eth_uc_stats_start(struct ipa_eth_device *eth_dev)
+{
+ return ipa_eth_uc_stats_control(eth_dev, true);
+}
+
+int ipa_eth_uc_stats_stop(struct ipa_eth_device *eth_dev)
+{
+ return ipa_eth_uc_stats_control(eth_dev, false);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 9c0643d0156f..8bfbab5c9e55 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -41,6 +41,7 @@
#include <asm/cacheflush.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/of_irq.h>
+#include <linux/ctype.h>
#ifdef CONFIG_ARM64
@@ -2676,6 +2677,11 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
+ case IPA_IOC_APP_CLOCK_VOTE:
+ retval = ipa3_app_clk_vote(
+ (enum ipa_app_clock_vote_type) arg);
+ break;
+
default:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -ENOTTY;
@@ -4440,6 +4446,9 @@ long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case IPA_IOC_GET_NAT_IN_SRAM_INFO32:
cmd = IPA_IOC_GET_NAT_IN_SRAM_INFO;
break;
+ case IPA_IOC_APP_CLOCK_VOTE32:
+ cmd = IPA_IOC_APP_CLOCK_VOTE;
+ break;
case IPA_IOC_COMMIT_HDR:
case IPA_IOC_RESET_HDR:
case IPA_IOC_COMMIT_RT:
@@ -5970,6 +5979,8 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
char dbg_buff[32] = { 0 };
+ int i = 0;
+
if (sizeof(dbg_buff) < count + 1)
return -EFAULT;
@@ -5989,6 +6000,17 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (ipa3_is_ready())
return count;
+ /*Ignore empty ipa_config file*/
+ for (i = 0 ; i < count ; ++i) {
+ if (!isspace(dbg_buff[i]))
+ break;
+ }
+
+ if (i == count) {
+ IPADBG("Empty ipa_config file\n");
+ return count;
+ }
+
/* Check MHI configuration on MDM devices */
if (!ipa3_is_msm_device()) {
@@ -6731,7 +6753,11 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
/* proxy vote for modem is added in ipa3_post_init() phase */
if (ipa3_ctx->ipa_hw_type != IPA_HW_v4_0)
ipa3_proxy_clk_unvote();
+
+ mutex_init(&ipa3_ctx->app_clock_vote.mutex);
+
return 0;
+
fail_cdev_add:
fail_gsi_pre_fw_load_init:
ipa_eth_exit();
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 2c8b32bca46c..1017a6c3e913 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -2466,6 +2466,22 @@ done:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
+static ssize_t ipa3_read_app_clk_vote(
+ struct file *file,
+ char __user *ubuf,
+ size_t count,
+ loff_t *ppos)
+{
+ int cnt =
+ scnprintf(
+ dbg_buff,
+ IPA_MAX_MSG_LEN,
+ "%u\n",
+ ipa3_ctx->app_clock_vote.cnt);
+
+ return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
static void ipa_dump_status(struct ipahal_pkt_status *status)
{
IPA_DUMP_STATUS_FIELD(status_opcode);
@@ -2764,7 +2780,11 @@ static const struct ipa3_debugfs_file debugfs_files[] = {
"usb_gsi_stats", IPA_READ_ONLY_MODE, NULL, {
.read = ipa3_read_usb_gsi_stats,
}
- }
+ }, {
+ "app_clk_vote_cnt", IPA_READ_ONLY_MODE, NULL, {
+ .read = ipa3_read_app_clk_vote,
+ }
+ },
};
void ipa3_debugfs_pre_init(void)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index bc3b0233dbcd..1b6584dca521 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -252,6 +252,8 @@ enum {
# define __cpuc_flush_dcache_area __flush_dcache_area
#endif
+#define IPA_APP_VOTE_MAX 500
+
#define IPA_SMP2P_OUT_CLK_RSP_CMPLT_IDX 0
#define IPA_SMP2P_OUT_CLK_VOTE_IDX 1
#define IPA_SMP2P_SMEM_STATE_MASK 3
@@ -443,6 +445,9 @@ enum {
#define IPA_IOC_GET_NAT_IN_SRAM_INFO32 _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
compat_uptr_t)
+#define IPA_IOC_APP_CLOCK_VOTE32 _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_APP_CLOCK_VOTE, \
+ compat_uptr_t)
#endif /* #ifdef CONFIG_COMPAT */
#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
@@ -1718,6 +1723,11 @@ struct ipa3_pc_mbox_data {
struct mbox_chan *mbox;
};
+struct ipa3_app_clock_vote {
+ struct mutex mutex;
+ u32 cnt;
+};
+
/**
* struct ipa3_context - IPA context
* @cdev: cdev context
@@ -1808,6 +1818,7 @@ struct ipa3_pc_mbox_data {
* @flt_rt_counters: the counters usage info for flt rt stats
* @wdi3_ctx: IPA wdi3 context
* @gsi_info: channel/protocol info for GSI offloading uC stats
+ * @app_vote: holds userspace application clock vote count
* IPA context - holds all relevant info about IPA driver and its state
*/
struct ipa3_context {
@@ -1979,6 +1990,7 @@ struct ipa3_context {
gsi_info[IPA_HW_PROTOCOL_MAX];
bool ipa_mhi_proxy;
bool ipa_wan_skb_page;
+ struct ipa3_app_clock_vote app_clock_vote;
};
struct ipa3_plat_drv_res {
@@ -2480,6 +2492,7 @@ int ipa3_del_ipv6ct_table(struct ipa_ioc_nat_ipv6ct_table_del *del);
int ipa3_nat_mdfy_pdn(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
int ipa3_nat_get_sram_info(struct ipa_nat_in_sram_info *info_ptr);
+int ipa3_app_clk_vote(enum ipa_app_clock_vote_type vote_type);
/*
* Messaging
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 71a94d5cfafa..cbff7bc35408 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -8246,3 +8246,48 @@ int ipa3_get_prot_id(enum ipa_client_type client)
return prot_id;
}
+int ipa3_app_clk_vote(
+ enum ipa_app_clock_vote_type vote_type)
+{
+ const char *str_ptr = "APP_VOTE";
+ int ret = 0;
+
+ IPADBG("In\n");
+
+ mutex_lock(&ipa3_ctx->app_clock_vote.mutex);
+
+ switch (vote_type) {
+ case IPA_APP_CLK_VOTE:
+ if ((ipa3_ctx->app_clock_vote.cnt + 1) <= IPA_APP_VOTE_MAX) {
+ ipa3_ctx->app_clock_vote.cnt++;
+ IPA_ACTIVE_CLIENTS_INC_SPECIAL(str_ptr);
+ } else {
+ IPAERR_RL("App vote count max hit\n");
+ ret = -EPERM;
+ break;
+ }
+ break;
+ case IPA_APP_CLK_DEVOTE:
+ if (ipa3_ctx->app_clock_vote.cnt) {
+ ipa3_ctx->app_clock_vote.cnt--;
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL(str_ptr);
+ }
+ break;
+ case IPA_APP_CLK_RESET_VOTE:
+ while (ipa3_ctx->app_clock_vote.cnt > 0) {
+ IPA_ACTIVE_CLIENTS_DEC_SPECIAL(str_ptr);
+ ipa3_ctx->app_clock_vote.cnt--;
+ }
+ break;
+ default:
+ IPAERR_RL("Unknown vote_type(%u)\n", vote_type);
+ ret = -EPERM;
+ break;
+ }
+
+ mutex_unlock(&ipa3_ctx->app_clock_vote.mutex);
+
+ IPADBG("Out\n");
+
+ return ret;
+}
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 8157373cc883..4435ac91e612 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -66,6 +66,10 @@
#define TR_RING_ELEMENT_SZ sizeof(struct mhi_dev_transfer_ring_element)
#define RING_ELEMENT_TYPE_SZ sizeof(union mhi_dev_ring_element_type)
+#define MHI_DEV_CH_CLOSE_TIMEOUT_MIN 5000
+#define MHI_DEV_CH_CLOSE_TIMEOUT_MAX 5100
+#define MHI_DEV_CH_CLOSE_TIMEOUT_COUNT 30
+
uint32_t bhi_imgtxdb;
enum mhi_msg_level mhi_msg_lvl = MHI_MSG_ERROR;
enum mhi_msg_level mhi_ipc_msg_lvl = MHI_MSG_VERBOSE;
@@ -75,7 +79,8 @@ static struct mhi_dev *mhi_ctx;
static void mhi_hwc_cb(void *priv, enum ipa_mhi_event_type event,
unsigned long data);
static void mhi_ring_init_cb(void *user_data);
-static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info);
+static void mhi_update_state_info(enum mhi_ctrl_info info);
+static void mhi_update_state_info_ch(uint32_t ch_id, enum mhi_ctrl_info info);
static int mhi_deinit(struct mhi_dev *mhi);
static void mhi_dev_resume_init_with_link_up(struct ep_pcie_notify *notify);
static int mhi_dev_pcie_notify_event;
@@ -698,8 +703,7 @@ static int mhi_enable_int(void)
pr_err("Failed to enable command db: %d\n", rc);
return rc;
}
- mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
- MHI_STATE_CONNECTED);
+ mhi_update_state_info(MHI_STATE_CONNECTED);
if (!mhi_ctx->mhi_int)
ep_pcie_mask_irq_event(mhi_ctx->phandle,
EP_PCIE_INT_EVT_MHI_A7, true);
@@ -1206,7 +1210,7 @@ static int mhi_dev_process_stop_cmd(struct mhi_dev_ring *ring, uint32_t ch_id,
return 0;
} else if (mhi->ch_ctx_cache[ch_id].ch_type ==
MHI_DEV_CH_TYPE_INBOUND_CHANNEL &&
- mhi->ch[ch_id].wr_request_active) {
+ (mhi->ch[ch_id].pend_wr_count > 0)) {
mhi_log(MHI_MSG_INFO, "Pending inbound transaction\n");
return 0;
}
@@ -1345,7 +1349,7 @@ send_start_completion_event:
if (rc)
pr_err("Error sending command completion event\n");
- mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
+ mhi_update_state_info_ch(ch_id, MHI_STATE_CONNECTED);
/* Trigger callback to clients */
mhi_dev_trigger_cb(ch_id);
mhi_uci_chan_state_notify(mhi, ch_id, MHI_STATE_CONNECTED);
@@ -1401,7 +1405,7 @@ send_start_completion_event:
pr_err("stop event send failed\n");
mutex_unlock(&ch->ch_lock);
- mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
+ mhi_update_state_info_ch(ch_id, MHI_STATE_DISCONNECTED);
/* Trigger callback to clients */
mhi_dev_trigger_cb(ch_id);
mhi_uci_chan_state_notify(mhi, ch_id,
@@ -1478,7 +1482,7 @@ send_start_completion_event:
if (rc)
pr_err("Error sending command completion event\n");
mutex_unlock(&ch->ch_lock);
- mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
+ mhi_update_state_info_ch(ch_id, MHI_STATE_DISCONNECTED);
mhi_dev_trigger_cb(ch_id);
mhi_uci_chan_state_notify(mhi, ch_id,
MHI_STATE_DISCONNECTED);
@@ -1758,9 +1762,23 @@ static void mhi_dev_transfer_completion_cb(void *mreq)
rd_offset = req->rd_offset;
ch->curr_ereq->context = ch;
+ if (mhi->ch_ctx_cache[ch->ch_id].ch_type ==
+ MHI_DEV_CH_TYPE_INBOUND_CHANNEL)
+ ch->pend_wr_count--;
+
dma_unmap_single(&mhi_ctx->pdev->dev, req->dma,
req->len, DMA_FROM_DEVICE);
+ /*
+ * Channel got closed with transfers pending
+ * Do not trigger callback or send cmpl to host
+ */
+ if (ch->state == MHI_DEV_CH_CLOSED) {
+ mhi_log(MHI_MSG_DBG, "Ch %d closed with %d writes pending\n",
+ ch->ch_id, ch->pend_wr_count + 1);
+ return;
+ }
+
/* Trigger client call back */
req->client_cb(req);
@@ -2159,7 +2177,7 @@ int mhi_dev_resume(struct mhi_dev *mhi)
mhi_ctx->write_to_host(mhi, &data_transfer, NULL,
MHI_DEV_DMA_SYNC);
}
- mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONNECTED);
+ mhi_update_state_info(MHI_STATE_CONNECTED);
atomic_set(&mhi->is_suspended, 0);
@@ -2306,9 +2324,27 @@ int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
}
EXPORT_SYMBOL(mhi_dev_channel_isempty);
+bool mhi_dev_channel_has_pending_write(struct mhi_dev_client *handle)
+{
+ struct mhi_dev_channel *ch;
+
+ if (!handle) {
+ mhi_log(MHI_MSG_ERROR, "Invalid channel access\n");
+ return -EINVAL;
+ }
+
+ ch = handle->channel;
+ if (!ch)
+ return -EINVAL;
+
+ return ch->pend_wr_count ? true : false;
+}
+EXPORT_SYMBOL(mhi_dev_channel_has_pending_write);
+
void mhi_dev_close_channel(struct mhi_dev_client *handle)
{
struct mhi_dev_channel *ch;
+ int count = 0;
if (!handle) {
mhi_log(MHI_MSG_ERROR, "Invalid channel access:%d\n", -ENODEV);
@@ -2316,8 +2352,20 @@ void mhi_dev_close_channel(struct mhi_dev_client *handle)
}
ch = handle->channel;
+ do {
+ if (ch->pend_wr_count) {
+ usleep_range(MHI_DEV_CH_CLOSE_TIMEOUT_MIN,
+ MHI_DEV_CH_CLOSE_TIMEOUT_MAX);
+ } else
+ break;
+ } while (++count < MHI_DEV_CH_CLOSE_TIMEOUT_COUNT);
+
mutex_lock(&ch->ch_lock);
+ if (ch->pend_wr_count)
+ mhi_log(MHI_MSG_ERROR, "%d writes pending for channel %d\n",
+ ch->pend_wr_count, ch->ch_id);
+
if (ch->state != MHI_DEV_CH_PENDING_START)
if ((ch->ch_type == MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL &&
!mhi_dev_channel_isempty(handle)) || ch->tre_loc)
@@ -2526,6 +2574,7 @@ int mhi_dev_write_channel(struct mhi_req *wreq)
size_t bytes_to_write = 0;
size_t bytes_written = 0;
uint32_t tre_len = 0, suspend_wait_timeout = 0;
+ bool async_wr_sched = false;
if (WARN_ON(!wreq || !wreq->client || !wreq->buf)) {
pr_err("%s: invalid parameters\n", __func__);
@@ -2569,12 +2618,12 @@ int mhi_dev_write_channel(struct mhi_req *wreq)
handle_client = wreq->client;
ch = handle_client->channel;
- ch->wr_request_active = true;
ring = ch->ring;
mutex_lock(&ch->ch_lock);
+ ch->pend_wr_count++;
if (ch->state == MHI_DEV_CH_STOPPED) {
mhi_log(MHI_MSG_ERROR,
"channel %d already stopped\n", wreq->chan);
@@ -2625,7 +2674,8 @@ int mhi_dev_write_channel(struct mhi_req *wreq)
"Error while writing chan (%d) rc %d\n",
wreq->chan, rc);
goto exit;
- }
+ } else if (wreq->mode == DMA_ASYNC)
+ async_wr_sched = true;
bytes_written += bytes_to_write;
usr_buf_remaining -= bytes_to_write;
@@ -2665,7 +2715,8 @@ int mhi_dev_write_channel(struct mhi_req *wreq)
}
}
exit:
- ch->wr_request_active = false;
+ if (wreq->mode == DMA_SYNC || !async_wr_sched)
+ ch->pend_wr_count--;
mutex_unlock(&ch->ch_lock);
mutex_unlock(&mhi_ctx->mhi_write_test);
return bytes_written;
@@ -2844,7 +2895,7 @@ static void mhi_dev_enable(struct work_struct *work)
enable_irq(mhi_ctx->mhi_irq);
}
- mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_CONFIGURED);
+ mhi_update_state_info(MHI_STATE_CONFIGURED);
}
static void mhi_ring_init_cb(void *data)
@@ -2902,22 +2953,22 @@ int mhi_register_state_cb(void (*mhi_state_cb)
}
EXPORT_SYMBOL(mhi_register_state_cb);
-static void mhi_update_state_info(uint32_t uevent_idx, enum mhi_ctrl_info info)
+static void mhi_update_state_info_ch(uint32_t ch_id, enum mhi_ctrl_info info)
{
struct mhi_dev_client_cb_reason reason;
- if (uevent_idx == MHI_DEV_UEVENT_CTRL)
- mhi_ctx->ctrl_info = info;
-
- channel_state_info[uevent_idx].ctrl_info = info;
-
- if (uevent_idx == MHI_CLIENT_QMI_OUT ||
- uevent_idx == MHI_CLIENT_QMI_IN) {
+ channel_state_info[ch_id].ctrl_info = info;
+ if (ch_id == MHI_CLIENT_QMI_OUT || ch_id == MHI_CLIENT_QMI_IN) {
/* For legacy reasons for QTI client */
reason.reason = MHI_DEV_CTRL_UPDATE;
uci_ctrl_update(&reason);
}
+}
+
+static void mhi_update_state_info(enum mhi_ctrl_info info)
+{
+ mhi_ctx->ctrl_info = info;
}
int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
@@ -3456,8 +3507,7 @@ static int mhi_dev_probe(struct platform_device *pdev)
mutex_init(&mhi_ctx->mhi_lock);
mhi_uci_init();
- mhi_update_state_info(MHI_DEV_UEVENT_CTRL,
- MHI_STATE_CONFIGURED);
+ mhi_update_state_info(MHI_STATE_CONFIGURED);
}
if (mhi_ctx->use_edma) {
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index f792e3d786ce..2d5b7108c7e8 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -460,7 +460,7 @@ struct mhi_dev_channel {
uint32_t tre_bytes_left;
/* td size being read/written from/to so far */
uint32_t td_size;
- bool wr_request_active;
+ uint32_t pend_wr_count;
bool skip_td;
};
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index cbe0acee8ddf..ac8901689fa9 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -46,6 +46,10 @@
#define MHI_UCI_AT_CTRL_READ_TIMEOUT msecs_to_jiffies(1000)
#define MHI_UCI_WRITE_REQ_AVAIL_TIMEOUT msecs_to_jiffies(1000)
+#define MHI_UCI_RELEASE_TIMEOUT_MIN 5000
+#define MHI_UCI_RELEASE_TIMEOUT_MAX 5100
+#define MHI_UCI_RELEASE_TIMEOUT_COUNT 30
+
enum uci_dbg_level {
UCI_DBG_VERBOSE = 0x0,
UCI_DBG_INFO = 0x1,
@@ -960,39 +964,55 @@ static int mhi_uci_client_release(struct inode *mhi_inode,
struct file *file_handle)
{
struct uci_client *uci_handle = file_handle->private_data;
+ int count = 0;
if (!uci_handle)
return -EINVAL;
- if (atomic_sub_return(1, &uci_handle->ref_count) == 0) {
- uci_log(UCI_DBG_DBG,
- "Last client left, closing channel 0x%x\n",
- iminor(mhi_inode));
- if (atomic_read(&uci_handle->mhi_chans_open)) {
- atomic_set(&uci_handle->mhi_chans_open, 0);
+ if (atomic_sub_return(1, &uci_handle->ref_count)) {
+ uci_log(UCI_DBG_DBG, "Client close chan %d, ref count 0x%x\n",
+ iminor(mhi_inode),
+ atomic_read(&uci_handle->ref_count));
+ return 0;
+ }
- if (!(uci_handle->f_flags & O_SYNC))
- kfree(uci_handle->wreqs);
- mutex_lock(&uci_handle->out_chan_lock);
- mhi_dev_close_channel(uci_handle->out_handle);
- wake_up(&uci_handle->write_wq);
- mutex_unlock(&uci_handle->out_chan_lock);
+ uci_log(UCI_DBG_DBG,
+ "Last client left, closing channel 0x%x\n",
+ iminor(mhi_inode));
- mutex_lock(&uci_handle->in_chan_lock);
- mhi_dev_close_channel(uci_handle->in_handle);
- wake_up(&uci_handle->read_wq);
- mutex_unlock(&uci_handle->in_chan_lock);
+ do {
+ if (mhi_dev_channel_has_pending_write(uci_handle->out_handle))
+ usleep_range(MHI_UCI_RELEASE_TIMEOUT_MIN,
+ MHI_UCI_RELEASE_TIMEOUT_MAX);
+ else
+ break;
+ } while (++count < MHI_UCI_RELEASE_TIMEOUT_COUNT);
- }
- atomic_set(&uci_handle->read_data_ready, 0);
- atomic_set(&uci_handle->write_data_ready, 0);
- file_handle->private_data = NULL;
- } else {
- uci_log(UCI_DBG_DBG,
- "Client close chan %d, ref count 0x%x\n",
- iminor(mhi_inode),
- atomic_read(&uci_handle->ref_count));
+ if (count == MHI_UCI_RELEASE_TIMEOUT_COUNT) {
+ uci_log(UCI_DBG_DBG, "Channel %d has pending writes\n",
+ iminor(mhi_inode));
}
+
+ if (atomic_read(&uci_handle->mhi_chans_open)) {
+ atomic_set(&uci_handle->mhi_chans_open, 0);
+
+ if (!(uci_handle->f_flags & O_SYNC))
+ kfree(uci_handle->wreqs);
+ mutex_lock(&uci_handle->out_chan_lock);
+ mhi_dev_close_channel(uci_handle->out_handle);
+ wake_up(&uci_handle->write_wq);
+ mutex_unlock(&uci_handle->out_chan_lock);
+
+ mutex_lock(&uci_handle->in_chan_lock);
+ mhi_dev_close_channel(uci_handle->in_handle);
+ wake_up(&uci_handle->read_wq);
+ mutex_unlock(&uci_handle->in_chan_lock);
+ }
+
+ atomic_set(&uci_handle->read_data_ready, 0);
+ atomic_set(&uci_handle->write_data_ready, 0);
+ file_handle->private_data = NULL;
+
return 0;
}
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 2e3bf40424a1..25bdbab9aae8 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -158,6 +158,7 @@ struct qpnp_qg {
int ibat_tcss_entry;
int soc_tcss;
int tcss_entry_count;
+ int max_fcc_limit_ma;
u32 fifo_done_count;
u32 wa_flags;
u32 seq_no;
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 6253bd6b92cf..e15ef2bdafd5 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -279,6 +279,7 @@ static int qg_store_soc_params(struct qpnp_qg *chip)
return rc;
}
+#define MAX_FIFO_CNT_FOR_ESR 50
static int qg_config_s2_state(struct qpnp_qg *chip,
enum s2_state requested_state, bool state_enable,
bool process_fifo)
@@ -336,6 +337,9 @@ static int qg_config_s2_state(struct qpnp_qg *chip,
return -EINVAL;
}
+ if (fifo_length)
+ qg_esr_mod_count = MAX_FIFO_CNT_FOR_ESR / fifo_length;
+
rc = qg_master_hold(chip, true);
if (rc < 0) {
pr_err("Failed to hold master, rc=%d\n", rc);
@@ -2935,6 +2939,13 @@ static int qg_load_battery_profile(struct qpnp_qg *chip)
chip->bp.fastchg_curr_ma = -EINVAL;
}
+ /*
+ * Update the max fcc values based on QG subtype including
+ * error margins.
+ */
+ chip->bp.fastchg_curr_ma = min(chip->max_fcc_limit_ma,
+ chip->bp.fastchg_curr_ma);
+
rc = of_property_read_u32(profile_node, "qcom,qg-batt-profile-ver",
&chip->bp.qg_profile_version);
if (rc < 0) {
@@ -3306,6 +3317,8 @@ static int qg_sanitize_sdam(struct qpnp_qg *chip)
}
#define ADC_CONV_DLY_512MS 0xA
+#define IBAT_5A_FCC_MA 4800
+#define IBAT_10A_FCC_MA 9600
static int qg_hw_init(struct qpnp_qg *chip)
{
int rc, temp;
@@ -3319,6 +3332,11 @@ static int qg_hw_init(struct qpnp_qg *chip)
return rc;
}
+ if (chip->qg_subtype == QG_ADC_IBAT_5A)
+ chip->max_fcc_limit_ma = IBAT_5A_FCC_MA;
+ else
+ chip->max_fcc_limit_ma = IBAT_10A_FCC_MA;
+
rc = qg_set_wa_flags(chip);
if (rc < 0) {
pr_err("Failed to update PMIC type flags, rc=%d\n", rc);
diff --git a/drivers/soc/qcom/cdsprm.c b/drivers/soc/qcom/cdsprm.c
index 73406b19ac14..d715e69a4384 100644
--- a/drivers/soc/qcom/cdsprm.c
+++ b/drivers/soc/qcom/cdsprm.c
@@ -543,8 +543,8 @@ static void process_delayed_rm_request(struct work_struct *work)
(curr_timestamp < timestamp)) {
if ((timestamp - curr_timestamp) <
(gcdsprm.qos_max_ms * SYS_CLK_TICKS_PER_MS))
- time_ms = (timestamp - curr_timestamp) /
- SYS_CLK_TICKS_PER_MS;
+ time_ms = div_u64((timestamp - curr_timestamp),
+ SYS_CLK_TICKS_PER_MS);
else
break;
gcdsprm.dt_state = CDSP_DELAY_THREAD_BEFORE_SLEEP;
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 36540a1b56db..7b4d99d2ef6d 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -24,6 +24,7 @@
#define GLINK_PROBE_LOG_PAGE_CNT 4
static void *glink_ilc;
+static DEFINE_MUTEX(ssr_lock);
#define GLINK_INFO(x, ...) \
do { \
@@ -126,14 +127,15 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
{
struct glink_ssr_nb *nb = container_of(this, struct glink_ssr_nb, nb);
struct glink_ssr *ssr = nb->ssr;
- struct device *dev = ssr->dev;
+ struct device *dev;
struct do_cleanup_msg msg;
int ret;
- if (!dev || !ssr->ept)
- return NOTIFY_DONE;
-
kref_get(&ssr->refcount);
+ mutex_lock(&ssr_lock);
+ dev = ssr->dev;
+ if (!dev || !ssr->ept)
+ goto out;
if (code == SUBSYS_AFTER_SHUTDOWN) {
ssr->seq_num++;
@@ -153,8 +155,7 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
if (ret) {
GLINK_ERR(dev, "fail to send do cleanup to %s %d\n",
nb->ssr_label, ret);
- kref_put(&ssr->refcount, glink_ssr_release);
- return NOTIFY_DONE;
+ goto out;
}
ret = wait_for_completion_timeout(&ssr->completion, HZ);
@@ -162,6 +163,8 @@ static int glink_ssr_ssr_cb(struct notifier_block *this,
GLINK_ERR(dev, "timeout waiting for cleanup resp\n");
}
+out:
+ mutex_unlock(&ssr_lock);
kref_put(&ssr->refcount, glink_ssr_release);
return NOTIFY_DONE;
}
@@ -274,10 +277,12 @@ static void glink_ssr_remove(struct rpmsg_device *rpdev)
{
struct glink_ssr *ssr = dev_get_drvdata(&rpdev->dev);
+ mutex_lock(&ssr_lock);
ssr->dev = NULL;
ssr->ept = NULL;
- dev_set_drvdata(&rpdev->dev, NULL);
+ mutex_unlock(&ssr_lock);
+ dev_set_drvdata(&rpdev->dev, NULL);
schedule_work(&ssr->unreg_work);
}
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 6713013cd5dc..91c11d25fdd3 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -58,6 +58,7 @@ MODULE_PARM_DESC(rmnet_wq_frequency, "Frequency of PS check in ms");
#ifdef CONFIG_QCOM_QMI_DFC
static unsigned int qmi_rmnet_scale_factor = 5;
+static LIST_HEAD(qos_cleanup_list);
#endif
static int
@@ -139,8 +140,7 @@ qmi_rmnet_has_pending(struct qmi_info *qmi)
#ifdef CONFIG_QCOM_QMI_DFC
static void
-qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
- struct qos_info *qos)
+qmi_rmnet_clean_flow_list(struct qos_info *qos)
{
struct rmnet_bearer_map *bearer, *br_tmp;
struct rmnet_flow_map *itm, *fl_tmp;
@@ -510,10 +510,6 @@ struct rmnet_bearer_map *qmi_rmnet_get_bearer_noref(struct qos_info *qos_info,
}
#else
-static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
-{
-}
-
static inline void
qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
struct rmnet_flow_map *new_map)
@@ -923,19 +919,27 @@ void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
}
EXPORT_SYMBOL(qmi_rmnet_qos_init);
-void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+void qmi_rmnet_qos_exit_pre(void *qos)
{
- void *port = rmnet_get_rmnet_port(dev);
- struct qmi_info *qmi = rmnet_get_qmi_pt(port);
- struct qos_info *qos_info = (struct qos_info *)qos;
-
- if (!qmi || !qos)
+ if (!qos)
return;
- qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
- kfree(qos);
+ list_add(&((struct qos_info *)qos)->list, &qos_cleanup_list);
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_pre);
+
+void qmi_rmnet_qos_exit_post(void)
+{
+ struct qos_info *qos, *tmp;
+
+ synchronize_rcu();
+ list_for_each_entry_safe(qos, tmp, &qos_cleanup_list, list) {
+ list_del(&qos->list);
+ qmi_rmnet_clean_flow_list(qos);
+ kfree(qos);
+ }
}
-EXPORT_SYMBOL(qmi_rmnet_qos_exit);
+EXPORT_SYMBOL(qmi_rmnet_qos_exit_post);
#endif
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index cf00cc6c6a78..f788a3464400 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -71,6 +71,7 @@ struct mq_map {
};
struct qos_info {
+ struct list_head list;
u8 mux_id;
struct net_device *real_dev;
struct list_head flow_head;
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
index 388382055964..279947b579e8 100644
--- a/drivers/soc/qcom/rq_stats.c
+++ b/drivers/soc/qcom/rq_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, 2017-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2017-2019 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,7 +29,7 @@ static void def_work_fn(struct work_struct *work)
static ssize_t show_def_timer_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- int64_t diff;
+ uint64_t diff;
unsigned int udiff;
diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 062458ecf76f..c4aad4e02b72 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -116,7 +116,7 @@
#define UART_CONSOLE_CORE2X_VOTE (960)
#define WAKEBYTE_TIMEOUT_MSEC (2000)
-#define WAIT_XFER_MAX_ITER (50)
+#define WAIT_XFER_MAX_ITER (2)
#define WAIT_XFER_MAX_TIMEOUT_US (10000)
#define WAIT_XFER_MIN_TIMEOUT_US (9000)
#define IPC_LOG_PWR_PAGES (6)
@@ -205,6 +205,8 @@ static int msm_geni_serial_runtime_resume(struct device *dev);
static int msm_geni_serial_runtime_suspend(struct device *dev);
static int uart_line_id;
static int msm_geni_serial_get_ver_info(struct uart_port *uport);
+static void msm_geni_serial_set_manual_flow(bool enable,
+ struct msm_geni_serial_port *port);
#define GET_DEV_PORT(uport) \
container_of(uport, struct msm_geni_serial_port, uport)
@@ -305,6 +307,12 @@ static void wait_for_transfers_inflight(struct uart_port *uport)
{
int iter = 0;
struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+ unsigned int geni_status;
+
+ geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
+ /* Possible stop rx is called before this. */
+ if (!(geni_status & S_GENI_CMD_ACTIVE))
+ return;
while (iter < WAIT_XFER_MAX_ITER) {
if (check_transfers_inflight(uport)) {
@@ -630,6 +638,7 @@ static void msm_geni_serial_poll_cancel_tx(struct uart_port *uport)
static void msm_geni_serial_abort_rx(struct uart_port *uport)
{
unsigned int irq_clear = S_CMD_DONE_EN;
+ struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
geni_abort_s_cmd(uport->membase);
/* Ensure this goes through before polling. */
@@ -638,6 +647,8 @@ static void msm_geni_serial_abort_rx(struct uart_port *uport)
msm_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
S_GENI_CMD_ABORT, false);
geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_S_IRQ_CLEAR);
+ /* FORCE_DEFAULT makes RFR default high, hence set manually Low */
+ msm_geni_serial_set_manual_flow(true, port);
geni_write_reg(FORCE_DEFAULT, uport->membase, GENI_FORCE_DEFAULT_REG);
}
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 8e24a09ddbcf..df51c6b21fe5 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1519,6 +1519,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
switch (op) {
case GSI_EP_OP_PREPARE_TRBS:
+ if (!dwc->pullups_connected) {
+ dbg_log_string("No Pullup\n");
+ return -ESHUTDOWN;
+ }
+
request = (struct usb_gsi_request *)op_data;
ret = gsi_prepare_trbs(ep, request);
break;
@@ -1527,12 +1532,22 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
gsi_free_trbs(ep, request);
break;
case GSI_EP_OP_CONFIG:
+ if (!dwc->pullups_connected) {
+ dbg_log_string("No Pullup\n");
+ return -ESHUTDOWN;
+ }
+
request = (struct usb_gsi_request *)op_data;
spin_lock_irqsave(&dwc->lock, flags);
gsi_configure_ep(ep, request);
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case GSI_EP_OP_STARTXFER:
+ if (!dwc->pullups_connected) {
+ dbg_log_string("No Pullup\n");
+ return -ESHUTDOWN;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
ret = gsi_startxfer_for_ep(ep);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1545,6 +1560,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
gsi_store_ringbase_dbl_info(ep, request);
break;
case GSI_EP_OP_ENABLE_GSI:
+ if (!dwc->pullups_connected) {
+ dbg_log_string("No Pullup\n");
+ return -ESHUTDOWN;
+ }
+
gsi_enable(ep);
break;
case GSI_EP_OP_GET_CH_INFO:
@@ -1552,6 +1572,11 @@ static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
gsi_get_channel_info(ep, ch_info);
break;
case GSI_EP_OP_RING_DB:
+ if (!dwc->pullups_connected) {
+ dbg_log_string("No Pullup\n");
+ return -ESHUTDOWN;
+ }
+
request = (struct usb_gsi_request *)op_data;
gsi_ring_db(ep, request);
break;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 123f3cebd316..da0689d0e13d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -835,19 +835,6 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
dbg_log_string("START for %s(%d)", dep->name, dep->number);
dwc3_stop_active_transfer(dwc, dep->number, true);
- /* - giveback all requests to gadget driver */
- while (!list_empty(&dep->started_list)) {
- req = next_request(&dep->started_list);
-
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
- }
-
- while (!list_empty(&dep->pending_list)) {
- req = next_request(&dep->pending_list);
-
- dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
- }
-
if (dep->number == 1 && dwc->ep0state != EP0_SETUP_PHASE) {
unsigned int dir;
@@ -862,6 +849,19 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
dwc->eps[1]->trb_enqueue = 0;
}
+ /* - giveback all requests to gadget driver */
+ while (!list_empty(&dep->started_list)) {
+ req = next_request(&dep->started_list);
+ if (req)
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ }
+
+ while (!list_empty(&dep->pending_list)) {
+ req = next_request(&dep->pending_list);
+ if (req)
+ dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+ }
+
dbg_log_string("DONE for %s(%d)", dep->name, dep->number);
}
@@ -2073,6 +2073,38 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
return 0;
}
+/**
+ * dwc3_device_core_soft_reset - Issues device core soft reset
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_device_core_soft_reset(struct dwc3 *dwc)
+{
+ u32 reg;
+ int retries = 10;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= DWC3_DCTL_CSFTRST;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ do {
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ if (!(reg & DWC3_DCTL_CSFTRST))
+ goto done;
+
+ usleep_range(1000, 1100);
+ } while (--retries);
+
+ dev_err(dwc->dev, "%s timedout\n", __func__);
+
+ return -ETIMEDOUT;
+
+done:
+ /* phy sync delay as per data book */
+ msleep(50);
+
+ return 0;
+}
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg, reg1;
@@ -2239,6 +2271,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ if (!is_on && ret == -ETIMEDOUT) {
+ dev_err(dwc->dev, "%s: Core soft reset...\n", __func__);
+ dwc3_device_core_soft_reset(dwc);
+ }
enable_irq(dwc->irq);
pm_runtime_mark_last_busy(dwc->dev);
@@ -2323,6 +2359,7 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
{
struct dwc3 *dwc = gadget_to_dwc(_gadget);
unsigned long flags;
+ int ret = 0;
if (dwc->dr_mode != USB_DR_MODE_OTG)
return -EPERM;
@@ -2345,9 +2382,9 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
* Both vbus was activated by otg and pullup was
* signaled by the gadget driver.
*/
- dwc3_gadget_run_stop(dwc, 1, false);
+ ret = dwc3_gadget_run_stop(dwc, 1, false);
} else {
- dwc3_gadget_run_stop(dwc, 0, false);
+ ret = dwc3_gadget_run_stop(dwc, 0, false);
}
}
@@ -2361,6 +2398,11 @@ static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
}
spin_unlock_irqrestore(&dwc->lock, flags);
+ if (!is_active && ret == -ETIMEDOUT) {
+ dev_err(dwc->dev, "%s: Core soft reset...\n", __func__);
+ dwc3_device_core_soft_reset(dwc);
+ }
+
return 0;
}
diff --git a/drivers/video/backlight/qcom-spmi-wled.c b/drivers/video/backlight/qcom-spmi-wled.c
index 97a6cd1dd474..24409117db1d 100644
--- a/drivers/video/backlight/qcom-spmi-wled.c
+++ b/drivers/video/backlight/qcom-spmi-wled.c
@@ -1510,7 +1510,8 @@ static int wled_get_max_avail_current(struct led_classdev *led_cdev,
int *max_current)
{
struct wled *wled;
- int rc, ocv_mv, r_bat_mohms, i_bat_ma, i_sink_ma = 0, max_fsc_ma;
+ int rc, ocv_mv, r_bat_mohms, i_bat_ma;
+ int64_t max_fsc_ma, i_sink_ma = 0;
int64_t p_out_string, p_out, p_in, v_safe_mv, i_flash_ma, v_ph_mv;
if (!strcmp(led_cdev->name, "wled_switch"))
@@ -1556,7 +1557,7 @@ static int wled_get_max_avail_current(struct led_classdev *led_cdev,
p_out_string = ((wled->leds_per_string * V_LED_MV) + V_HDRM_MV) *
I_FLASH_MAX_MA;
p_out = p_out_string * wled->num_strings;
- p_in = (p_out * 1000) / EFF_FACTOR;
+ p_in = div_s64(p_out * 1000, EFF_FACTOR);
pr_debug("p_out_string: %lld, p_out: %lld, p_in: %lld\n", p_out_string,
p_out, p_in);
@@ -1568,8 +1569,9 @@ static int wled_get_max_avail_current(struct led_classdev *led_cdev,
return 0;
}
- i_flash_ma = p_in / v_safe_mv;
- v_ph_mv = ocv_mv - ((i_bat_ma + i_flash_ma) * r_bat_mohms) / 1000;
+ i_flash_ma = div_s64(p_in, v_safe_mv);
+ v_ph_mv = ocv_mv - div_s64(((i_bat_ma + i_flash_ma) * r_bat_mohms),
+ 1000);
pr_debug("v_safe: %lld, i_flash: %lld, v_ph: %lld\n", v_safe_mv,
i_flash_ma, v_ph_mv);
@@ -1578,19 +1580,22 @@ static int wled_get_max_avail_current(struct led_classdev *led_cdev,
if (wled->num_strings == 3 && wled->leds_per_string == 8) {
if (v_ph_mv < 3410) {
/* For 8s3p, I_sink(mA) = 25.396 * Vph(V) - 26.154 */
- i_sink_ma = (((25396 * v_ph_mv) / 1000) - 26154) / 1000;
+ i_sink_ma = div_s64((div_s64((25396 * v_ph_mv),
+ 1000) - 26154), 1000);
i_sink_ma *= wled->num_strings;
}
} else if (wled->num_strings == 3 && wled->leds_per_string == 6) {
if (v_ph_mv < 2800) {
/* For 6s3p, I_sink(mA) = 41.311 * Vph(V) - 52.334 */
- i_sink_ma = (((41311 * v_ph_mv) / 1000) - 52334) / 1000;
+ i_sink_ma = div_s64((div_s64((41311 * v_ph_mv),
+ 1000) - 52334), 1000);
i_sink_ma *= wled->num_strings;
}
} else if (wled->num_strings == 4 && wled->leds_per_string == 6) {
if (v_ph_mv < 3400) {
/* For 6s4p, I_sink(mA) = 26.24 * Vph(V) - 24.834 */
- i_sink_ma = (((26240 * v_ph_mv) / 1000) - 24834) / 1000;
+ i_sink_ma = div_s64((div_s64((26240 * v_ph_mv),
+ 1000) - 24834), 1000);
i_sink_ma *= wled->num_strings;
}
} else if (v_ph_mv < 3200) {
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index fdf40ca04b3c..1b18baa4978c 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -9,6 +9,7 @@
#include <linux/mm_types.h>
#include <asm/dma.h>
#include <asm/processor.h>
+#include <linux/memblock.h>
/*
* simple boot-time physical memory area allocator.
@@ -172,6 +173,9 @@ void __memblock_free_late(phys_addr_t base, phys_addr_t size);
static inline void * __init memblock_virt_alloc(
phys_addr_t size, phys_addr_t align)
{
+ memblock_dbg("%s: %llu bytes align=0x%llx %pF\n",
+ __func__, (u64)size, (u64)align, (void *)_RET_IP_);
+
return memblock_virt_alloc_try_nid(size, align, BOOTMEM_LOW_LIMIT,
BOOTMEM_ALLOC_ACCESSIBLE,
NUMA_NO_NODE);
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 64b5c1400bdf..31bceb7ea99c 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -22,6 +22,8 @@ struct mhi_timesync;
struct mhi_buf_info;
struct mhi_sfr_info;
+#define REG_WRITE_QUEUE_LEN 1024
+
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
@@ -149,6 +151,19 @@ struct image_info {
};
/**
+ * struct reg_write_info - offload reg write info
+ * @reg_addr - register address
+ * @val - value to be written to register
+ * @chan - channel number
+ * @valid - entry is valid or not
+ */
+struct reg_write_info {
+ void __iomem *reg_addr;
+ u32 val;
+ bool valid;
+};
+
+/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
@@ -313,6 +328,8 @@ struct mhi_controller {
void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
int (*bw_scale)(struct mhi_controller *mhi_cntrl,
struct mhi_link_info *link_info);
+ void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *base,
+ u32 offset, u32 val);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
@@ -339,10 +356,18 @@ struct mhi_controller {
/* controller specific data */
const char *name;
+ bool power_down;
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
+
+ /* for reg write offload */
+ struct workqueue_struct *offload_wq;
+ struct work_struct reg_write_work;
+ struct reg_write_info *reg_write_q;
+ atomic_t write_idx;
+ u32 read_idx;
};
/**
diff --git a/include/linux/msm_mhi_dev.h b/include/linux/msm_mhi_dev.h
index 3ec1e6d5e6e9..1f3b680453a4 100644
--- a/include/linux/msm_mhi_dev.h
+++ b/include/linux/msm_mhi_dev.h
@@ -195,6 +195,13 @@ int mhi_dev_write_channel(struct mhi_req *wreq);
int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
/**
+* mhi_dev_channel_has_pending_write() - Checks if there are any pending writes
+* to be completed on inbound channel
+* @handle_client: Client Handle issued during mhi_dev_open_channel
+*/
+bool mhi_dev_channel_has_pending_write(struct mhi_dev_client *handle);
+
+/**
* mhi_ctrl_state_info() - Provide MHI state info
* @idx: Channel number idx. Look at channel_state_info and
* pass the index for the corresponding channel.
@@ -244,6 +251,12 @@ static inline int mhi_dev_channel_isempty(struct mhi_dev_client *handle)
return -EINVAL;
};
+static inline bool mhi_dev_channel_has_pending_write
+ (struct mhi_dev_client *handle)
+{
+ return false;
+}
+
static inline int mhi_ctrl_state_info(uint32_t idx, uint32_t *info)
{
return -EINVAL;
diff --git a/include/linux/msm_pcie.h b/include/linux/msm_pcie.h
index 58d4fb22733d..a5020e767d13 100644
--- a/include/linux/msm_pcie.h
+++ b/include/linux/msm_pcie.h
@@ -82,6 +82,26 @@ static inline int msm_msi_init(struct device *dev)
#ifdef CONFIG_PCI_MSM
/**
+ * msm_pcie_allow_l1 - allow PCIe link to re-enter L1
+ * @pci_dev: client's pci device structure
+ *
+ * This function gives PCIe clients the control to allow the link to re-enter
+ * L1. Should only be used after msm_pcie_prevent_l1 has been called.
+ */
+void msm_pcie_allow_l1(struct pci_dev *pci_dev);
+
+/**
+ * msm_pcie_request_not_enter_l1 - keeps PCIe link out of L1
+ * @pci_dev: client's pci device structure
+ *
+ * This function gives PCIe clients the control to exit and prevent the link
+ * from entering L1.
+ *
+ * Return 0 on success, negative value on error
+ */
+int msm_pcie_prevent_l1(struct pci_dev *pci_dev);
+
+/**
* msm_pcie_set_link_bandwidth - updates the number of lanes and speed of PCIe
* link.
* @pci_dev: client's pci device structure
@@ -218,6 +238,15 @@ static inline int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr,
return -ENODEV;
}
+static inline void msm_pcie_request_allow_l1(struct pci_dev *pci_dev)
+{
+}
+
+static inline int msm_pcie_request_not_enter_l1(struct pci_dev *pci_dev)
+{
+ return -ENODEV;
+}
+
static inline int msm_pcie_l1ss_timeout_disable(struct pci_dev *pci_dev)
{
return -ENODEV;
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index eb7fc1c80ff0..dbfdca1006de 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -53,7 +53,8 @@ qmi_rmnet_all_flows_enabled(struct net_device *dev)
#ifdef CONFIG_QCOM_QMI_DFC
void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
-void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
+void qmi_rmnet_qos_exit_pre(void *qos);
+void qmi_rmnet_qos_exit_post(void);
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len);
int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
@@ -64,7 +65,11 @@ qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
return NULL;
}
-static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+static inline void qmi_rmnet_qos_exit_pre(void *qos)
+{
+}
+
+static inline void qmi_rmnet_qos_exit_post(void)
{
}
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index afefb53d9aaf..55bae183af1c 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -123,6 +123,7 @@
#define IPA_IOCTL_FNR_COUNTER_DEALLOC 75
#define IPA_IOCTL_FNR_COUNTER_QUERY 76
#define IPA_IOCTL_GET_NAT_IN_SRAM_INFO 77
+#define IPA_IOCTL_APP_CLOCK_VOTE 78
/**
* max size of the header to be inserted
@@ -2814,6 +2815,10 @@ struct ipa_odl_modem_config {
IPA_IOCTL_GET_NAT_IN_SRAM_INFO, \
struct ipa_nat_in_sram_info)
+#define IPA_IOC_APP_CLOCK_VOTE _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_APP_CLOCK_VOTE, \
+ uint32_t)
+
/*
* unique magic number of the Tethering bridge ioctls
*/
@@ -2919,6 +2924,18 @@ struct ipa_nat_in_sram_info {
uint32_t best_nat_in_sram_size_rqst;
};
+/**
+ * enum ipa_app_clock_vote_type
+ *
+ * The types of votes that can be accepted by the
+ * IPA_IOC_APP_CLOCK_VOTE ioctl
+ */
+enum ipa_app_clock_vote_type {
+ IPA_APP_CLK_DEVOTE = 0,
+ IPA_APP_CLK_VOTE = 1,
+ IPA_APP_CLK_RESET_VOTE = 2,
+};
+
#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
struct teth_ioc_set_bridge_mode *)
diff --git a/include/uapi/linux/msm_npu.h b/include/uapi/linux/msm_npu.h
index 9e88b6aaee93..91118ffa4238 100644
--- a/include/uapi/linux/msm_npu.h
+++ b/include/uapi/linux/msm_npu.h
@@ -73,6 +73,8 @@
#define MSM_NPU_PROP_ID_PERF_MODE_MAX (MSM_NPU_PROP_ID_START + 2)
#define MSM_NPU_PROP_ID_DRV_VERSION (MSM_NPU_PROP_ID_START + 3)
#define MSM_NPU_PROP_ID_HARDWARE_VERSION (MSM_NPU_PROP_ID_START + 4)
+#define MSM_NPU_PROP_ID_IPC_QUEUE_INFO (MSM_NPU_PROP_ID_START + 5)
+#define MSM_NPU_PROP_ID_DRV_FEATURE (MSM_NPU_PROP_ID_START + 6)
#define MSM_NPU_FW_PROP_ID_START 0x1000
#define MSM_NPU_PROP_ID_DCVS_MODE (MSM_NPU_FW_PROP_ID_START + 0)
@@ -81,6 +83,9 @@
#define MSM_NPU_PROP_ID_HW_VERSION (MSM_NPU_FW_PROP_ID_START + 3)
#define MSM_NPU_PROP_ID_FW_VERSION (MSM_NPU_FW_PROP_ID_START + 4)
+/* features supported by driver */
+#define MSM_NPU_FEATURE_MULTI_EXECUTE 0x1
+#define MSM_NPU_FEATURE_ASYNC_EXECUTE 0x2
#define PROP_PARAM_MAX_SIZE 8
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 8d1a5bb2687b..e83c4185ac6c 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -173,6 +173,7 @@ enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK = 10,
SNDRV_COMPRESS_ADJUST_SESSION_CLOCK = 11,
SNDRV_COMPRESS_LATENCY_MODE = 12,
+ SNDRV_COMPRESS_IN_TTP_OFFSET = 13,
};
#define SNDRV_COMPRESS_MIN_BLK_SIZE SNDRV_COMPRESS_MIN_BLK_SIZE
@@ -186,6 +187,7 @@ enum sndrv_compress_encoder {
SNDRV_COMPRESS_ENABLE_ADJUST_SESSION_CLOCK
#define SNDRV_COMPRESS_ADJUST_SESSION_CLOCK SNDRV_COMPRESS_ADJUST_SESSION_CLOCK
#define SNDRV_COMPRESS_LATENCY_MODE SNDRV_COMPRESS_LATENCY_MODE
+#define SNDRV_COMPRESS_IN_TTP_OFFSET SNDRV_COMPRESS_IN_TTP_OFFSET
/**
* struct snd_compr_metadata - compressed stream metadata
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index a9f8cc24a357..ac164e037a62 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -424,6 +424,13 @@ struct snd_dec_aptx {
__u32 nap;
};
+/** struct snd_dec_dsd - codec for DSD format
+ * @blk_size - dsd channel block size
+ */
+struct snd_dec_dsd {
+ __u32 blk_size;
+};
+
/** struct snd_dec_pcm - codec options for PCM format
* @num_channels: Number of channels
* @ch_map: Channel map for the above corresponding channels
@@ -452,6 +459,7 @@ union snd_codec_options {
struct snd_dec_thd truehd;
struct snd_dec_pcm pcm_dec;
struct snd_dec_amrwb_plus amrwbplus;
+ struct snd_dec_dsd dsd_dec;
};
/** struct snd_codec_desc - description of codec capabilities
diff --git a/mm/memblock.c b/mm/memblock.c
index 0e1b8f2b2aeb..621726756042 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1161,6 +1161,8 @@ phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
ulong flags)
{
+ memblock_dbg("%s: size: %llu align: %llu %pF\n",
+ __func__, (u64)size, (u64)align, (void *)_RET_IP_);
return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
flags);
}
@@ -1190,6 +1192,8 @@ again:
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
+ memblock_dbg("%s: size: %llu align: %llu %pF\n",
+ __func__, (u64)size, (u64)align, (void *)_RET_IP_);
return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
MEMBLOCK_NONE);
}
@@ -1209,6 +1213,8 @@ phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
{
+ memblock_dbg("%s: size: %llu align: %llu %pF\n",
+ __func__, (u64)size, (u64)align, (void *)_RET_IP_);
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 491ede7d156e..da1c5d34fa9e 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -663,40 +663,38 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
struct avc_node *pos, *node = NULL;
int hvalue;
unsigned long flag;
+ spinlock_t *lock;
+ struct hlist_head *head;
if (avc_latest_notif_update(avd->seqno, 1))
- goto out;
+ return NULL;
node = avc_alloc_node();
- if (node) {
- struct hlist_head *head;
- spinlock_t *lock;
- int rc = 0;
-
- hvalue = avc_hash(ssid, tsid, tclass);
- avc_node_populate(node, ssid, tsid, tclass, avd);
- rc = avc_xperms_populate(node, xp_node);
- if (rc) {
- kmem_cache_free(avc_node_cachep, node);
- return NULL;
- }
- head = &avc_cache.slots[hvalue];
- lock = &avc_cache.slots_lock[hvalue];
+ if (!node)
+ return NULL;
- spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, head, list) {
- if (pos->ae.ssid == ssid &&
- pos->ae.tsid == tsid &&
- pos->ae.tclass == tclass) {
- avc_node_replace(node, pos);
- goto found;
- }
+ avc_node_populate(node, ssid, tsid, tclass, avd);
+ if (avc_xperms_populate(node, xp_node)) {
+ avc_node_kill(node);
+ return NULL;
+ }
+
+ hvalue = avc_hash(ssid, tsid, tclass);
+ head = &avc_cache.slots[hvalue];
+ lock = &avc_cache.slots_lock[hvalue];
+ spin_lock_irqsave(lock, flag);
+ hlist_for_each_entry(pos, head, list) {
+ if (pos->ae.ssid == ssid &&
+ pos->ae.tsid == tsid &&
+ pos->ae.tclass == tclass) {
+ avc_node_replace(node, pos);
+ goto found;
}
- hlist_add_head_rcu(&node->list, head);
-found:
- spin_unlock_irqrestore(lock, flag);
}
-out:
+
+ hlist_add_head_rcu(&node->list, head);
+found:
+ spin_unlock_irqrestore(lock, flag);
return node;
}
@@ -865,7 +863,7 @@ static int avc_update_node(u32 event, u32 perms, u8 driver, u8 xperm, u32 ssid,
if (orig->ae.xp_node) {
rc = avc_xperms_populate(node, orig->ae.xp_node);
if (rc) {
- kmem_cache_free(avc_node_cachep, node);
+ avc_node_kill(node);
goto out_unlock;
}
}