aboutsummaryrefslogtreecommitdiff
path: root/drivers/remoteproc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/remoteproc')
-rw-r--r--drivers/remoteproc/Kconfig170
-rw-r--r--drivers/remoteproc/Makefile11
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c1121
-rw-r--r--drivers/remoteproc/imx_rproc.c604
-rw-r--r--drivers/remoteproc/imx_rproc.h39
-rw-r--r--drivers/remoteproc/ingenic_rproc.c254
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c4
-rw-r--r--drivers/remoteproc/meson_mx_ao_arc.c261
-rw-r--r--drivers/remoteproc/mtk_common.h141
-rw-r--r--drivers/remoteproc/mtk_scp.c994
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c218
-rw-r--r--drivers/remoteproc/omap_remoteproc.c1197
-rw-r--r--drivers/remoteproc/omap_remoteproc.h50
-rw-r--r--drivers/remoteproc/pru_rproc.c919
-rw-r--r--drivers/remoteproc/pru_rproc.h46
-rw-r--r--drivers/remoteproc/qcom_common.c296
-rw-r--r--drivers/remoteproc/qcom_common.h18
-rw-r--r--drivers/remoteproc/qcom_pil_info.c129
-rw-r--r--drivers/remoteproc/qcom_pil_info.h9
-rw-r--r--drivers/remoteproc/qcom_q6v5.c109
-rw-r--r--drivers/remoteproc/qcom_q6v5.h14
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c269
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c939
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c568
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c620
-rw-r--r--drivers/remoteproc/qcom_sysmon.c234
-rw-r--r--drivers/remoteproc/qcom_wcnss.c190
-rw-r--r--drivers/remoteproc/qcom_wcnss.h4
-rw-r--r--drivers/remoteproc/qcom_wcnss_iris.c119
-rw-r--r--drivers/remoteproc/rcar_rproc.c224
-rw-r--r--drivers/remoteproc/remoteproc_cdev.c126
-rw-r--r--drivers/remoteproc/remoteproc_core.c1147
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c469
-rw-r--r--drivers/remoteproc/remoteproc_debugfs.c152
-rw-r--r--drivers/remoteproc/remoteproc_elf_helpers.h122
-rw-r--r--drivers/remoteproc/remoteproc_elf_loader.c192
-rw-r--r--drivers/remoteproc/remoteproc_internal.h82
-rw-r--r--drivers/remoteproc/remoteproc_sysfs.c189
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c39
-rw-r--r--drivers/remoteproc/st_remoteproc.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c6
-rw-r--r--drivers/remoteproc/stm32_rproc.c288
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c908
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c1797
-rw-r--r--drivers/remoteproc/ti_sci_proc.h104
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c46
46 files changed, 14363 insertions, 1077 deletions
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index 94afdde4bc9f..166019786653 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -14,18 +14,59 @@ config REMOTEPROC
if REMOTEPROC
+config REMOTEPROC_CDEV
+ bool "Remoteproc character device interface"
+ help
+ Say y here to have a character device interface for the remoteproc
+ framework. Userspace can boot/shutdown remote processors through
+ this interface.
+
+ It's safe to say N if you don't want to use this interface.
+
config IMX_REMOTEPROC
- tristate "IMX6/7 remoteproc support"
+ tristate "i.MX remoteproc support"
+ depends on ARCH_MXC
+ depends on HAVE_ARM_SMCCC
+ select MAILBOX
+ help
+ Say y here to support iMX's remote processors via the remote
+ processor framework.
+
+ It's safe to say N here.
+
+config IMX_DSP_REMOTEPROC
+ tristate "i.MX DSP remoteproc support"
depends on ARCH_MXC
+ depends on HAVE_ARM_SMCCC
+ select MAILBOX
help
- Say y here to support iMX's remote processors (Cortex M4
- on iMX7D) via the remote processor framework.
+ Say y here to support iMX's DSP remote processors via the remote
+ processor framework.
+
+ It's safe to say N here.
+
+config INGENIC_VPU_RPROC
+ tristate "Ingenic JZ47xx VPU remoteproc support"
+ depends on MIPS || COMPILE_TEST
+ help
+ Say y or m here to support the VPU in the JZ47xx SoCs from Ingenic.
+
+ This can be either built-in or a loadable module.
+ If unsure say N.
+
+config MTK_SCP
+ tristate "Mediatek SCP support"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select RPMSG_MTK_SCP
+ help
+ Say y here to support Mediatek's System Companion Processor (SCP) via
+ the remote processor framework.
It's safe to say N here.
config OMAP_REMOTEPROC
tristate "OMAP remoteproc support"
- depends on ARCH_OMAP4 || SOC_OMAP5
+ depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX
depends on OMAP_IOMMU
select MAILBOX
select OMAP2PLUS_MBOX
@@ -42,6 +83,18 @@ config OMAP_REMOTEPROC
It's safe to say N here if you're not interested in multimedia
offloading or just want a bare minimum kernel.
+config OMAP_REMOTEPROC_WATCHDOG
+ bool "OMAP remoteproc watchdog timer"
+ depends on OMAP_REMOTEPROC
+ default n
+ help
+ Say Y here to enable watchdog timer for remote processors.
+
+ This option controls the watchdog functionality for the remote
+ processors in OMAP. Dedicated OMAP DMTimers are used by the remote
+ processors and triggers the timer interrupt upon a watchdog
+ detection.
+
config WKUP_M3_RPROC
tristate "AMx3xx Wakeup M3 remoteproc support"
depends on SOC_AM33XX || SOC_AM43XX
@@ -85,6 +138,32 @@ config KEYSTONE_REMOTEPROC
It's safe to say N here if you're not interested in the Keystone
DSPs or just want to use a bare minimum kernel.
+config MESON_MX_AO_ARC_REMOTEPROC
+ tristate "Amlogic Meson6/8/8b/8m2 AO ARC remote processor support"
+ depends on HAS_IOMEM
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
+ select GENERIC_ALLOCATOR
+ help
+ Say m or y here to have support for the AO ARC remote processor
+ on Amlogic Meson6/Meson8/Meson8b/Meson8m2 SoCs. This is
+ typically used for system suspend.
+ If unsure say N.
+
+config PRU_REMOTEPROC
+ tristate "TI PRU remoteproc support"
+ depends on TI_PRUSS
+ default TI_PRUSS
+ help
+ Support for TI PRU remote processors present within a PRU-ICSS
+ subsystem via the remote processor framework.
+
+ Say Y or M here to support the Programmable Realtime Unit (PRU)
+ processors on various TI SoCs. It's safe to say N here if you're
+ not interested in the PRU or if you are unsure.
+
+config QCOM_PIL_INFO
+ tristate
+
config QCOM_RPROC_COMMON
tristate
@@ -97,65 +176,83 @@ config QCOM_Q6V5_ADSP
tristate "Qualcomm Technology Inc ADSP Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
- depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
help
Say y here to support the Peripheral Image Loader
- for the Qualcomm Technology Inc. ADSP remote processors.
+ for the non-TrustZone part of Qualcomm Technology Inc. ADSP and CDSP
+ remote processors. The TrustZone part is handled by QCOM_Q6V5_PAS
+ driver.
config QCOM_Q6V5_MSS
tristate "Qualcomm Hexagon V5 self-authenticating modem subsystem support"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
- depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
Say y here to support the Qualcomm self-authenticating modem
- subsystem based on Hexagon V5.
+ subsystem based on Hexagon V5. The TrustZone based system is
+ handled by QCOM_Q6V5_PAS driver.
config QCOM_Q6V5_PAS
tristate "Qualcomm Hexagon v5 Peripheral Authentication Service support"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
- depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
+ select QCOM_PIL_INFO
select QCOM_MDT_LOADER
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the TrustZone based Peripherial Image Loader
+ Say y here to support the TrustZone based Peripheral Image Loader
for the Qualcomm Hexagon v5 based remote processors. This is commonly
- used to control subsystems such as ADSP, Compute and Sensor.
+ used to control subsystems such as ADSP (Audio DSP),
+ CDSP (Compute DSP), MPSS (Modem Peripheral SubSystem), and
+ SLPI (Sensor Low Power Island).
config QCOM_Q6V5_WCSS
tristate "Qualcomm Hexagon based WCSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
depends on QCOM_SMEM
- depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_Q6V5_COMMON
select QCOM_RPROC_COMMON
select QCOM_SCM
help
Say y here to support the Qualcomm Peripheral Image Loader for the
- Hexagon V5 based WCSS remote processors.
+ Hexagon V5 based WCSS remote processors on e.g. IPQ8074. This is
+ a non-TrustZone wireless subsystem.
config QCOM_SYSMON
tristate "Qualcomm sysmon driver"
@@ -175,16 +272,31 @@ config QCOM_SYSMON
config QCOM_WCNSS_PIL
tristate "Qualcomm WCNSS Peripheral Image Loader"
depends on OF && ARCH_QCOM
- depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n)
+ depends on RPMSG_QCOM_SMD || RPMSG_QCOM_SMD=n
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SMEM
depends on QCOM_SYSMON || QCOM_SYSMON=n
+ depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
select QCOM_MDT_LOADER
+ select QCOM_PIL_INFO
select QCOM_RPROC_COMMON
select QCOM_SCM
help
- Say y here to support the Peripheral Image Loader for the Qualcomm
- Wireless Connectivity Subsystem.
+ Say y here to support the Peripheral Image Loader for loading WCNSS
+ firmware and boot the core on e.g. MSM8974, MSM8916. The firmware is
+ verified and booted with the help of the Peripheral Authentication
+ System (PAS) in TrustZone.
+
+config RCAR_REMOTEPROC
+ tristate "Renesas R-Car Gen3 remoteproc support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Say y here to support R-Car realtime processor via the
+ remote processor framework. An ELF firmware can be loaded
+ thanks to sysfs remoteproc entries. The remote processor
+ can be started and stopped.
+ This can be either built-in or a loadable module.
+ If compiled as module (M), the module name is rcar_rproc.
config ST_REMOTEPROC
tristate "ST remoteproc support"
@@ -214,6 +326,32 @@ config STM32_RPROC
This can be either built-in or a loadable module.
+config TI_K3_DSP_REMOTEPROC
+ tristate "TI K3 DSP remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's C66x and C71x DSP remote processor
+ subsystems on various TI K3 family of SoCs through the remote
+ processor framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ the DSP slave processors.
+
+config TI_K3_R5_REMOTEPROC
+ tristate "TI K3 R5 remoteproc support"
+ depends on ARCH_K3
+ select MAILBOX
+ select OMAP2PLUS_MBOX
+ help
+ Say m here to support TI's R5F remote processor subsystems
+ on various TI K3 family of SoCs through the remote processor
+ framework.
+
+ It's safe to say N here if you're not interested in utilizing
+ a slave processor.
+
endif # REMOTEPROC
endmenu
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 00f09e658cb3..5478c7cb9e07 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -5,15 +5,23 @@
obj-$(CONFIG_REMOTEPROC) += remoteproc.o
remoteproc-y := remoteproc_core.o
+remoteproc-y += remoteproc_coredump.o
remoteproc-y += remoteproc_debugfs.o
remoteproc-y += remoteproc_sysfs.o
remoteproc-y += remoteproc_virtio.o
remoteproc-y += remoteproc_elf_loader.o
+obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o
obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o
+obj-$(CONFIG_IMX_DSP_REMOTEPROC) += imx_dsp_rproc.o
+obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o
+obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o
obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o
obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o
obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o
obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o
+obj-$(CONFIG_MESON_MX_AO_ARC_REMOTEPROC)+= meson_mx_ao_arc.o
+obj-$(CONFIG_PRU_REMOTEPROC) += pru_rproc.o
+obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o
obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o
obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o
obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o
@@ -24,6 +32,9 @@ obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
+obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
+obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o
+obj-$(CONFIG_TI_K3_R5_REMOTEPROC) += ti_k3_r5_remoteproc.o
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
new file mode 100644
index 000000000000..ca0817f8e41e
--- /dev/null
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -0,0 +1,1121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright 2021 NXP */
+
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/slab.h>
+
+#include "imx_rproc.h"
+#include "remoteproc_elf_helpers.h"
+#include "remoteproc_internal.h"
+
+#define DSP_RPROC_CLK_MAX 5
+
+#define REMOTE_IS_READY BIT(0)
+#define REMOTE_READY_WAIT_MAX_RETRIES 500
+
+/* att flags */
+/* DSP own area */
+#define ATT_OWN BIT(31)
+/* DSP instruction area */
+#define ATT_IRAM BIT(30)
+
+/* Definitions for i.MX8MP */
+/* DAP registers */
+#define IMX8M_DAP_DEBUG 0x28800000
+#define IMX8M_DAP_DEBUG_SIZE (64 * 1024)
+#define IMX8M_DAP_PWRCTL (0x4000 + 0x3020)
+#define IMX8M_PWRCTL_CORERESET BIT(16)
+
+/* DSP audio mix registers */
+#define IMX8M_AudioDSP_REG0 0x100
+#define IMX8M_AudioDSP_REG1 0x104
+#define IMX8M_AudioDSP_REG2 0x108
+#define IMX8M_AudioDSP_REG3 0x10c
+
+#define IMX8M_AudioDSP_REG2_RUNSTALL BIT(5)
+#define IMX8M_AudioDSP_REG2_PWAITMODE BIT(1)
+
+/* Definitions for i.MX8ULP */
+#define IMX8ULP_SIM_LPAV_REG_SYSCTRL0 0x8
+#define IMX8ULP_SYSCTRL0_DSP_DBG_RST BIT(25)
+#define IMX8ULP_SYSCTRL0_DSP_PLAT_CLK_EN BIT(19)
+#define IMX8ULP_SYSCTRL0_DSP_PBCLK_EN BIT(18)
+#define IMX8ULP_SYSCTRL0_DSP_CLK_EN BIT(17)
+#define IMX8ULP_SYSCTRL0_DSP_RST BIT(16)
+#define IMX8ULP_SYSCTRL0_DSP_OCD_HALT BIT(14)
+#define IMX8ULP_SYSCTRL0_DSP_STALL BIT(13)
+
+#define IMX8ULP_SIP_HIFI_XRDC 0xc200000e
+
+/*
+ * enum - Predefined Mailbox Messages
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_RESUME_SYSTEM: system resume request for the remote processor
+ *
+ * @RP_MBOX_RESUME_ACK: successful response from remote processor for a
+ * resume request
+ */
+enum imx_dsp_rp_mbox_messages {
+ RP_MBOX_SUSPEND_SYSTEM = 0xFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFF12,
+ RP_MBOX_RESUME_SYSTEM = 0xFF13,
+ RP_MBOX_RESUME_ACK = 0xFF14,
+};
+
+/**
+ * struct imx_dsp_rproc - DSP remote processor state
+ * @regmap: regmap handler
+ * @rproc: rproc handler
+ * @dsp_dcfg: device configuration pointer
+ * @clks: clocks needed by this device
+ * @cl: mailbox client to request the mailbox channel
+ * @cl_rxdb: mailbox client to request the mailbox channel for doorbell
+ * @tx_ch: mailbox tx channel handle
+ * @rx_ch: mailbox rx channel handle
+ * @rxdb_ch: mailbox rx doorbell channel handle
+ * @pd_dev: power domain device
+ * @pd_dev_link: power domain device link
+ * @ipc_handle: System Control Unit ipc handle
+ * @rproc_work: work for processing virtio interrupts
+ * @pm_comp: completion primitive to sync for suspend response
+ * @num_domains: power domain number
+ * @flags: control flags
+ */
+struct imx_dsp_rproc {
+ struct regmap *regmap;
+ struct rproc *rproc;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg;
+ struct clk_bulk_data clks[DSP_RPROC_CLK_MAX];
+ struct mbox_client cl;
+ struct mbox_client cl_rxdb;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct mbox_chan *rxdb_ch;
+ struct device **pd_dev;
+ struct device_link **pd_dev_link;
+ struct imx_sc_ipc *ipc_handle;
+ struct work_struct rproc_work;
+ struct completion pm_comp;
+ int num_domains;
+ u32 flags;
+};
+
+/**
+ * struct imx_dsp_rproc_dcfg - DSP remote processor configuration
+ * @dcfg: imx_rproc_dcfg handler
+ * @reset: reset callback function
+ */
+struct imx_dsp_rproc_dcfg {
+ const struct imx_rproc_dcfg *dcfg;
+ int (*reset)(struct imx_dsp_rproc *priv);
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8qm[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x596e8000, 0x556e8000, 0x00008000, ATT_OWN },
+ { 0x596f0000, 0x556f0000, 0x00008000, ATT_OWN },
+ { 0x596f8000, 0x556f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x55700000, 0x55700000, 0x00070000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8qxp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x596e8000, 0x596e8000, 0x00008000, ATT_OWN },
+ { 0x596f0000, 0x596f0000, 0x00008000, ATT_OWN },
+ { 0x596f8000, 0x596f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x59700000, 0x59700000, 0x00070000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x80000000, 0x80000000, 0x60000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8mp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x3b6e8000, 0x3b6e8000, 0x00008000, ATT_OWN },
+ { 0x3b6f0000, 0x3b6f0000, 0x00008000, ATT_OWN },
+ { 0x3b6f8000, 0x3b6f8000, 0x00000800, ATT_OWN | ATT_IRAM},
+ { 0x3b700000, 0x3b700000, 0x00040000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0},
+};
+
+static const struct imx_rproc_att imx_dsp_rproc_att_imx8ulp[] = {
+ /* dev addr , sys addr , size , flags */
+ { 0x21170000, 0x21170000, 0x00010000, ATT_OWN | ATT_IRAM},
+ { 0x21180000, 0x21180000, 0x00010000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x0c000000, 0x80000000, 0x10000000, 0},
+ { 0x30000000, 0x90000000, 0x10000000, 0},
+};
+
+/* Reset function for DSP on i.MX8MP */
+static int imx8mp_dsp_reset(struct imx_dsp_rproc *priv)
+{
+ void __iomem *dap = ioremap_wc(IMX8M_DAP_DEBUG, IMX8M_DAP_DEBUG_SIZE);
+ int pwrctl;
+
+ /* Put DSP into reset and stall */
+ pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
+ pwrctl |= IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
+
+ /* Keep reset asserted for 10 cycles */
+ usleep_range(1, 2);
+
+ regmap_update_bits(priv->regmap, IMX8M_AudioDSP_REG2,
+ IMX8M_AudioDSP_REG2_RUNSTALL,
+ IMX8M_AudioDSP_REG2_RUNSTALL);
+
+ /* Take the DSP out of reset and keep stalled for FW loading */
+ pwrctl = readl(dap + IMX8M_DAP_PWRCTL);
+ pwrctl &= ~IMX8M_PWRCTL_CORERESET;
+ writel(pwrctl, dap + IMX8M_DAP_PWRCTL);
+
+ iounmap(dap);
+ return 0;
+}
+
+/* Reset function for DSP on i.MX8ULP */
+static int imx8ulp_dsp_reset(struct imx_dsp_rproc *priv)
+{
+ struct arm_smccc_res res;
+
+ /* Put DSP into reset and stall */
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_RST, IMX8ULP_SYSCTRL0_DSP_RST);
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_STALL,
+ IMX8ULP_SYSCTRL0_DSP_STALL);
+
+ /* Configure resources of DSP through TFA */
+ arm_smccc_smc(IMX8ULP_SIP_HIFI_XRDC, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ /* Take the DSP out of reset and keep stalled for FW loading */
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_RST, 0);
+ regmap_update_bits(priv->regmap, IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ IMX8ULP_SYSCTRL0_DSP_DBG_RST, 0);
+
+ return 0;
+}
+
+/* Specific configuration for i.MX8MP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8mp = {
+ .src_reg = IMX8M_AudioDSP_REG2,
+ .src_mask = IMX8M_AudioDSP_REG2_RUNSTALL,
+ .src_start = 0,
+ .src_stop = IMX8M_AudioDSP_REG2_RUNSTALL,
+ .att = imx_dsp_rproc_att_imx8mp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8mp),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8mp = {
+ .dcfg = &dsp_rproc_cfg_imx8mp,
+ .reset = imx8mp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8ULP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8ulp = {
+ .src_reg = IMX8ULP_SIM_LPAV_REG_SYSCTRL0,
+ .src_mask = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .src_start = 0,
+ .src_stop = IMX8ULP_SYSCTRL0_DSP_STALL,
+ .att = imx_dsp_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8ulp),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8ulp = {
+ .dcfg = &dsp_rproc_cfg_imx8ulp,
+ .reset = imx8ulp_dsp_reset,
+};
+
+/* Specific configuration for i.MX8QXP */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qxp = {
+ .att = imx_dsp_rproc_att_imx8qxp,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qxp),
+ .method = IMX_RPROC_SCU_API,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qxp = {
+ .dcfg = &dsp_rproc_cfg_imx8qxp,
+};
+
+/* Specific configuration for i.MX8QM */
+static const struct imx_rproc_dcfg dsp_rproc_cfg_imx8qm = {
+ .att = imx_dsp_rproc_att_imx8qm,
+ .att_size = ARRAY_SIZE(imx_dsp_rproc_att_imx8qm),
+ .method = IMX_RPROC_SCU_API,
+};
+
+static const struct imx_dsp_rproc_dcfg imx_dsp_rproc_cfg_imx8qm = {
+ .dcfg = &dsp_rproc_cfg_imx8qm,
+};
+
+static int imx_dsp_rproc_ready(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ int i;
+
+ if (!priv->rxdb_ch)
+ return 0;
+
+ for (i = 0; i < REMOTE_READY_WAIT_MAX_RETRIES; i++) {
+ if (priv->flags & REMOTE_IS_READY)
+ return 0;
+ usleep_range(100, 200);
+ }
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * Start function for rproc_ops
+ *
+ * There is a handshake for start procedure: when DSP starts, it
+ * will send a doorbell message to this driver, then the
+ * REMOTE_IS_READY flags is set, then driver will kick
+ * a message to DSP.
+ */
+static int imx_dsp_rproc_start(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct device *dev = rproc->dev.parent;
+ int ret;
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap,
+ dcfg->src_reg,
+ dcfg->src_mask,
+ dcfg->src_start);
+ break;
+ case IMX_RPROC_SCU_API:
+ ret = imx_sc_pm_cpu_start(priv->ipc_handle,
+ IMX_SC_R_DSP,
+ true,
+ rproc->bootaddr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to enable remote core!\n");
+ else
+ ret = imx_dsp_rproc_ready(rproc);
+
+ return ret;
+}
+
+/*
+ * Stop function for rproc_ops
+ * It clears the REMOTE_IS_READY flags
+ */
+static int imx_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct device *dev = rproc->dev.parent;
+ int ret = 0;
+
+ /* Make sure work is finished */
+ flush_work(&priv->rproc_work);
+
+ if (rproc->state == RPROC_CRASHED) {
+ priv->flags &= ~REMOTE_IS_READY;
+ return 0;
+ }
+
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_stop);
+ break;
+ case IMX_RPROC_SCU_API:
+ ret = imx_sc_pm_cpu_start(priv->ipc_handle,
+ IMX_SC_R_DSP,
+ false,
+ rproc->bootaddr);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret)
+ dev_err(dev, "Failed to stop remote core\n");
+ else
+ priv->flags &= ~REMOTE_IS_READY;
+
+ return ret;
+}
+
+/**
+ * imx_dsp_rproc_sys_to_da() - internal memory translation helper
+ * @priv: private data pointer
+ * @sys: system address (DDR address)
+ * @len: length of the memory buffer
+ * @da: device address to translate
+ *
+ * Convert system address (DDR address) to device address (DSP)
+ * for there may be memory remap for device.
+ */
+static int imx_dsp_rproc_sys_to_da(struct imx_dsp_rproc *priv, u64 sys,
+ size_t len, u64 *da)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ int i;
+
+ /* Parse address translation table */
+ for (i = 0; i < dcfg->att_size; i++) {
+ const struct imx_rproc_att *att = &dcfg->att[i];
+
+ if (sys >= att->sa && sys + len <= att->sa + att->size) {
+ unsigned int offset = sys - att->sa;
+
+ *da = att->da + offset;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+/* Main virtqueue message work function
+ *
+ * This function is executed upon scheduling of the i.MX DSP remoteproc
+ * driver's workqueue. The workqueue is scheduled by the mailbox rx
+ * handler.
+ *
+ * This work function processes both the Tx and Rx virtqueue indices on
+ * every invocation. The rproc_vq_interrupt function can detect if there
+ * are new unprocessed messages or not (returns IRQ_NONE vs IRQ_HANDLED),
+ * but there is no need to check for these return values. The index 0
+ * triggering will process all pending Rx buffers, and the index 1 triggering
+ * will process all newly available Tx buffers and will wakeup any potentially
+ * blocked senders.
+ *
+ * NOTE:
+ * The current logic is based on an inherent design assumption of supporting
+ * only 2 vrings, but this can be changed if needed.
+ */
+static void imx_dsp_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_dsp_rproc *priv = container_of(work, struct imx_dsp_rproc,
+ rproc_work);
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+}
+
+/**
+ * imx_dsp_rproc_rx_tx_callback() - inbound mailbox message handler
+ * @cl: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by mailbox driver whenever a mailbox
+ * message is received. Usually, the SUSPEND and RESUME related messages
+ * are handled in this function, other messages are handled by remoteproc core
+ */
+static void imx_dsp_rproc_rx_tx_callback(struct mbox_client *cl, void *data)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ u32 message = (u32)(*(u32 *)data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", message);
+
+ switch (message) {
+ case RP_MBOX_SUSPEND_ACK:
+ complete(&priv->pm_comp);
+ break;
+ case RP_MBOX_RESUME_ACK:
+ complete(&priv->pm_comp);
+ break;
+ default:
+ schedule_work(&priv->rproc_work);
+ break;
+ }
+}
+
+/**
+ * imx_dsp_rproc_rxdb_callback() - inbound mailbox message handler
+ * @cl: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * For doorbell, there is no message specified, just set REMOTE_IS_READY
+ * flag.
+ */
+static void imx_dsp_rproc_rxdb_callback(struct mbox_client *cl, void *data)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ /* Remote is ready after firmware is loaded and running */
+ priv->flags |= REMOTE_IS_READY;
+}
+
+/**
+ * imx_dsp_rproc_mbox_init() - request mailbox channels
+ * @priv: private data pointer
+ *
+ * Request three mailbox channels (tx, rx, rxdb).
+ */
+static int imx_dsp_rproc_mbox_init(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ struct mbox_client *cl;
+ int ret;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_dsp_rproc_rx_tx_callback;
+
+ /* Channel for sending message */
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch)) {
+ ret = PTR_ERR(priv->tx_ch);
+ dev_dbg(cl->dev, "failed to request tx mailbox channel: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ /* Channel for receiving message */
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ ret = PTR_ERR(priv->rx_ch);
+ dev_dbg(cl->dev, "failed to request rx mailbox channel: %d\n",
+ ret);
+ goto err_out;
+ }
+
+ cl = &priv->cl_rxdb;
+ cl->dev = dev;
+ cl->rx_callback = imx_dsp_rproc_rxdb_callback;
+
+ /*
+ * RX door bell is used to receive the ready signal from remote
+ * after firmware loaded.
+ */
+ priv->rxdb_ch = mbox_request_channel_byname(cl, "rxdb");
+ if (IS_ERR(priv->rxdb_ch)) {
+ ret = PTR_ERR(priv->rxdb_ch);
+ dev_dbg(cl->dev, "failed to request mbox chan rxdb, ret %d\n",
+ ret);
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ if (!IS_ERR(priv->tx_ch))
+ mbox_free_channel(priv->tx_ch);
+ if (!IS_ERR(priv->rx_ch))
+ mbox_free_channel(priv->rx_ch);
+ if (!IS_ERR(priv->rxdb_ch))
+ mbox_free_channel(priv->rxdb_ch);
+
+ return ret;
+}
+
+static void imx_dsp_rproc_free_mbox(struct imx_dsp_rproc *priv)
+{
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+ mbox_free_channel(priv->rxdb_ch);
+}
+
+/**
+ * imx_dsp_rproc_add_carveout() - request mailbox channels
+ * @priv: private data pointer
+ *
+ * This function registers specified memory entry in @rproc carveouts list
+ * The carveouts can help to mapping the memory address for DSP.
+ */
+static int imx_dsp_rproc_add_carveout(struct imx_dsp_rproc *priv)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ const struct imx_rproc_dcfg *dcfg = dsp_dcfg->dcfg;
+ struct rproc *rproc = priv->rproc;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ void __iomem *cpu_addr;
+ int a;
+ u64 da;
+
+ /* Remap required addresses */
+ for (a = 0; a < dcfg->att_size; a++) {
+ const struct imx_rproc_att *att = &dcfg->att[a];
+
+ if (!(att->flags & ATT_OWN))
+ continue;
+
+ if (imx_dsp_rproc_sys_to_da(priv, att->sa, att->size, &da))
+ return -EINVAL;
+
+ cpu_addr = devm_ioremap_wc(dev, att->sa, att->size);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to map memory %p\n", &att->sa);
+ return -ENOMEM;
+ }
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)att->sa,
+ att->size, da, NULL, NULL, "dsp_mem");
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, att->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (imx_dsp_rproc_sys_to_da(priv, rmem->base, rmem->size, &da))
+ return -EINVAL;
+
+ cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
+ if (!cpu_addr) {
+ dev_err(dev, "failed to map memory %p\n", &rmem->base);
+ return -ENOMEM;
+ }
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(dev, cpu_addr, (dma_addr_t)rmem->base,
+ rmem->size, da, NULL, NULL, it.node->name);
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+/* Prepare function for rproc_ops */
+static int imx_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct rproc_mem_entry *carveout;
+ int ret;
+
+ ret = imx_dsp_rproc_add_carveout(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_add_carveout\n");
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Clear buffers after pm rumtime for internal ocram is not
+ * accessible if power and clock are not enabled.
+ */
+ list_for_each_entry(carveout, &rproc->carveouts, node) {
+ if (carveout->va)
+ memset(carveout->va, 0, carveout->len);
+ }
+
+ return 0;
+}
+
+/* Unprepare function for rproc_ops */
+static int imx_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ pm_runtime_put_sync(rproc->dev.parent);
+
+ return 0;
+}
+
+/* Kick function for rproc_ops */
+static void imx_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_dsp_rproc *priv = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(dev, "%s: failed (%d, err:%d)\n", __func__, vqid, err);
+}
+
+static int imx_dsp_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+ return 0;
+}
+
+static const struct rproc_ops imx_dsp_rproc_ops = {
+ .prepare = imx_dsp_rproc_prepare,
+ .unprepare = imx_dsp_rproc_unprepare,
+ .start = imx_dsp_rproc_start,
+ .stop = imx_dsp_rproc_stop,
+ .kick = imx_dsp_rproc_kick,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_dsp_rproc_parse_fw,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+};
+
+/**
+ * imx_dsp_attach_pm_domains() - attach the power domains
+ * @priv: private data pointer
+ *
+ * On i.MX8QM and i.MX8QXP there is multiple power domains
+ * required, so need to link them.
+ */
+static int imx_dsp_attach_pm_domains(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ int ret, i;
+
+ priv->num_domains = of_count_phandle_with_args(dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /* If only one domain, then no need to link the device */
+ if (priv->num_domains <= 1)
+ return 0;
+
+ priv->pd_dev = devm_kmalloc_array(dev, priv->num_domains,
+ sizeof(*priv->pd_dev),
+ GFP_KERNEL);
+ if (!priv->pd_dev)
+ return -ENOMEM;
+
+ priv->pd_dev_link = devm_kmalloc_array(dev, priv->num_domains,
+ sizeof(*priv->pd_dev_link),
+ GFP_KERNEL);
+ if (!priv->pd_dev_link)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ priv->pd_dev[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(priv->pd_dev[i])) {
+ ret = PTR_ERR(priv->pd_dev[i]);
+ goto detach_pm;
+ }
+
+ /*
+ * device_link_add will check priv->pd_dev[i], if it is
+ * NULL, then will break.
+ */
+ priv->pd_dev_link[i] = device_link_add(dev,
+ priv->pd_dev[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (!priv->pd_dev_link[i]) {
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ ret = -EINVAL;
+ goto detach_pm;
+ }
+ }
+
+ return 0;
+
+detach_pm:
+ while (--i >= 0) {
+ device_link_del(priv->pd_dev_link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return ret;
+}
+
+static int imx_dsp_detach_pm_domains(struct imx_dsp_rproc *priv)
+{
+ int i;
+
+ if (priv->num_domains <= 1)
+ return 0;
+
+ for (i = 0; i < priv->num_domains; i++) {
+ device_link_del(priv->pd_dev_link[i]);
+ dev_pm_domain_detach(priv->pd_dev[i], false);
+ }
+
+ return 0;
+}
+
+/**
+ * imx_dsp_rproc_detect_mode() - detect DSP control mode
+ * @priv: private data pointer
+ *
+ * Different platform has different control method for DSP, which depends
+ * on how the DSP is integrated in platform.
+ *
+ * For i.MX8QXP and i.MX8QM, DSP should be started and stopped by System
+ * Control Unit.
+ * For i.MX8MP and i.MX8ULP, DSP should be started and stopped by system
+ * integration module.
+ */
+static int imx_dsp_rproc_detect_mode(struct imx_dsp_rproc *priv)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ struct device *dev = priv->rproc->dev.parent;
+ struct regmap *regmap;
+ int ret = 0;
+
+ switch (dsp_dcfg->dcfg->method) {
+ case IMX_RPROC_SCU_API:
+ ret = imx_scu_get_handle(&priv->ipc_handle);
+ if (ret)
+ return ret;
+ break;
+ case IMX_RPROC_MMIO:
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "fsl,dsp-ctrl");
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "failed to find syscon\n");
+ return PTR_ERR(regmap);
+ }
+
+ priv->regmap = regmap;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static const char *imx_dsp_clks_names[DSP_RPROC_CLK_MAX] = {
+ /* DSP clocks */
+ "core", "ocram", "debug", "ipg", "mu",
+};
+
+static int imx_dsp_rproc_clk_get(struct imx_dsp_rproc *priv)
+{
+ struct device *dev = priv->rproc->dev.parent;
+ struct clk_bulk_data *clks = priv->clks;
+ int i;
+
+ for (i = 0; i < DSP_RPROC_CLK_MAX; i++)
+ clks[i].id = imx_dsp_clks_names[i];
+
+ return devm_clk_bulk_get_optional(dev, DSP_RPROC_CLK_MAX, clks);
+}
+
+static int imx_dsp_rproc_probe(struct platform_device *pdev)
+{
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg;
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_rproc *priv;
+ struct rproc *rproc;
+ const char *fw_name;
+ int ret;
+
+ dsp_dcfg = of_device_get_match_data(dev);
+ if (!dsp_dcfg)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops, fw_name,
+ sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+ priv->rproc = rproc;
+ priv->dsp_dcfg = dsp_dcfg;
+
+ dev_set_drvdata(dev, rproc);
+
+ INIT_WORK(&priv->rproc_work, imx_dsp_rproc_vq_work);
+
+ ret = imx_dsp_rproc_detect_mode(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
+ goto err_put_rproc;
+ }
+
+ /* There are multiple power domains required by DSP on some platform */
+ ret = imx_dsp_attach_pm_domains(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
+ goto err_put_rproc;
+ }
+ /* Get clocks */
+ ret = imx_dsp_rproc_clk_get(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_clk_get\n");
+ goto err_detach_domains;
+ }
+
+ init_completion(&priv->pm_comp);
+ rproc->auto_boot = false;
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto err_detach_domains;
+ }
+
+ pm_runtime_enable(dev);
+
+ return 0;
+
+err_detach_domains:
+ imx_dsp_detach_pm_domains(priv);
+err_put_rproc:
+ rproc_free(rproc);
+
+ return ret;
+}
+
+static int imx_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ pm_runtime_disable(&pdev->dev);
+ rproc_del(rproc);
+ imx_dsp_detach_pm_domains(priv);
+ rproc_free(rproc);
+
+ return 0;
+}
+
+/* pm runtime functions */
+static int imx_dsp_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ const struct imx_dsp_rproc_dcfg *dsp_dcfg = priv->dsp_dcfg;
+ int ret;
+
+ /*
+ * There is power domain attached with mailbox, if setup mailbox
+ * in probe(), then the power of mailbox is always enabled,
+ * the power can't be saved.
+ * So move setup of mailbox to runtime resume.
+ */
+ ret = imx_dsp_rproc_mbox_init(priv);
+ if (ret) {
+ dev_err(dev, "failed on imx_dsp_rproc_mbox_init\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(DSP_RPROC_CLK_MAX, priv->clks);
+ if (ret) {
+ dev_err(dev, "failed on clk_bulk_prepare_enable\n");
+ return ret;
+ }
+
+ /* Reset DSP if needed */
+ if (dsp_dcfg->reset)
+ dsp_dcfg->reset(priv);
+
+ return 0;
+}
+
+static int imx_dsp_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+
+ clk_bulk_disable_unprepare(DSP_RPROC_CLK_MAX, priv->clks);
+
+ imx_dsp_rproc_free_mbox(priv);
+
+ return 0;
+}
+
+static void imx_dsp_load_firmware(const struct firmware *fw, void *context)
+{
+ struct rproc *rproc = context;
+ int ret;
+
+ /*
+ * Same flow as start procedure.
+ * Load the ELF segments to memory firstly.
+ */
+ ret = rproc_load_segments(rproc, fw);
+ if (ret)
+ goto out;
+
+ /* Start the remote processor */
+ ret = rproc->ops->start(rproc);
+ if (ret)
+ goto out;
+
+ rproc->ops->kick(rproc, 0);
+
+out:
+ release_firmware(fw);
+}
+
+static __maybe_unused int imx_dsp_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct imx_dsp_rproc *priv = rproc->priv;
+ __u32 mmsg = RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ if (rproc->state != RPROC_RUNNING)
+ goto out;
+
+ reinit_completion(&priv->pm_comp);
+
+ /* Tell DSP that suspend is happening */
+ ret = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * DSP need to save the context at suspend.
+ * Here waiting the response for DSP, then power can be disabled.
+ */
+ if (!wait_for_completion_timeout(&priv->pm_comp, msecs_to_jiffies(100)))
+ return -EBUSY;
+
+out:
+ /*
+ * The power of DSP is disabled in suspend, so force pm runtime
+ * to be suspend, then we can reenable the power and clocks at
+ * resume stage.
+ */
+ return pm_runtime_force_suspend(dev);
+}
+
+static __maybe_unused int imx_dsp_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ if (rproc->state != RPROC_RUNNING)
+ return 0;
+
+ /*
+ * The power of DSP is disabled at suspend, the memory of dsp
+ * is reset, the image segments are lost. So need to reload
+ * firmware and restart the DSP if it is in running state.
+ */
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
+ rproc->firmware, dev, GFP_KERNEL,
+ rproc, imx_dsp_load_firmware);
+ if (ret < 0) {
+ dev_err(dev, "load firmware failed: %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pm_runtime_force_suspend(dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_dsp_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx_dsp_suspend, imx_dsp_resume)
+ SET_RUNTIME_PM_OPS(imx_dsp_runtime_suspend,
+ imx_dsp_runtime_resume, NULL)
+};
+
+static const struct of_device_id imx_dsp_rproc_of_match[] = {
+ { .compatible = "fsl,imx8qxp-hifi4", .data = &imx_dsp_rproc_cfg_imx8qxp },
+ { .compatible = "fsl,imx8qm-hifi4", .data = &imx_dsp_rproc_cfg_imx8qm },
+ { .compatible = "fsl,imx8mp-hifi4", .data = &imx_dsp_rproc_cfg_imx8mp },
+ { .compatible = "fsl,imx8ulp-hifi4", .data = &imx_dsp_rproc_cfg_imx8ulp },
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_dsp_rproc_of_match);
+
+static struct platform_driver imx_dsp_rproc_driver = {
+ .probe = imx_dsp_rproc_probe,
+ .remove = imx_dsp_rproc_remove,
+ .driver = {
+ .name = "imx-dsp-rproc",
+ .of_match_table = imx_dsp_rproc_of_match,
+ .pm = &imx_dsp_rproc_pm_ops,
+ },
+};
+module_platform_driver(imx_dsp_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("i.MX HiFi Core Remote Processor Control Driver");
+MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 3e72b6f38d4b..4a3352821b1d 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -3,17 +3,24 @@
* Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
*/
+#include <linux/arm-smccc.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <linux/workqueue.h>
+
+#include "imx_rproc.h"
+#include "remoteproc_internal.h"
#define IMX7D_SRC_SCR 0x0C
#define IMX7D_ENABLE_M4 BIT(3)
@@ -27,7 +34,8 @@
#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
| IMX7D_SW_M4C_RST)
-#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST
+#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
+ IMX7D_SW_M4C_NON_SCLR_RST)
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
@@ -38,12 +46,18 @@
#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST
+#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
+ IMX6SX_SW_M4C_NON_SCLR_RST)
#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX7D_RPROC_MEM_MAX 8
+#define IMX_RPROC_MEM_MAX 32
+
+#define IMX_SIP_RPROC 0xC2000005
+#define IMX_SIP_RPROC_START 0x00
+#define IMX_SIP_RPROC_STARTED 0x01
+#define IMX_SIP_RPROC_STOP 0x02
/**
* struct imx_rproc_mem - slim internal memory structure
@@ -60,31 +74,119 @@ struct imx_rproc_mem {
/* att flags */
/* M4 own area. Can be mapped at probe */
#define ATT_OWN BIT(1)
-
-/* address translation table */
-struct imx_rproc_att {
- u32 da; /* device address (From Cortex M4 view)*/
- u32 sa; /* system bus address */
- u32 size; /* size of reg range */
- int flags;
-};
-
-struct imx_rproc_dcfg {
- u32 src_reg;
- u32 src_mask;
- u32 src_start;
- u32 src_stop;
- const struct imx_rproc_att *att;
- size_t att_size;
-};
+#define ATT_IOMEM BIT(2)
struct imx_rproc {
struct device *dev;
struct regmap *regmap;
struct rproc *rproc;
const struct imx_rproc_dcfg *dcfg;
- struct imx_rproc_mem mem[IMX7D_RPROC_MEM_MAX];
+ struct imx_rproc_mem mem[IMX_RPROC_MEM_MAX];
struct clk *clk;
+ struct mbox_client cl;
+ struct mbox_chan *tx_ch;
+ struct mbox_chan *rx_ch;
+ struct work_struct rproc_work;
+ struct workqueue_struct *workqueue;
+ void __iomem *rsc_table;
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx93[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCM CODE NON-SECURE */
+ { 0x0FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x0FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM CODE SECURE */
+ { 0x1FFC0000, 0x201C0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x1FFE0000, 0x201E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS NON-SECURE*/
+ { 0x20000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x20020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* TCM SYS SECURE*/
+ { 0x30000000, 0x20200000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ { 0x30020000, 0x20220000, 0x00020000, ATT_OWN | ATT_IOMEM },
+
+ /* DDR */
+ { 0x80000000, 0x80000000, 0x10000000, 0 },
+ { 0x90000000, 0x80000000, 0x10000000, 0 },
+
+ { 0xC0000000, 0xa0000000, 0x10000000, 0 },
+ { 0xD0000000, 0xa0000000, 0x10000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mn[] = {
+ /* dev addr , sys addr , size , flags */
+ /* ITCM */
+ { 0x00000000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00009000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00940000, 0x00940000, 0x00050000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x40000000, 0x0FFE0000, 0 },
+ /* DTCM */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM },
+ /* OCRAM_S - alias */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20240000, 0x00940000, 0x00040000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8mq[] = {
+ /* dev addr , sys addr , size , flags */
+ /* TCML - alias */
+ { 0x00000000, 0x007e0000, 0x00020000, ATT_IOMEM},
+ /* OCRAM_S */
+ { 0x00180000, 0x00180000, 0x00008000, 0 },
+ /* OCRAM */
+ { 0x00900000, 0x00900000, 0x00020000, 0 },
+ /* OCRAM */
+ { 0x00920000, 0x00920000, 0x00020000, 0 },
+ /* QSPI Code - alias */
+ { 0x08000000, 0x08000000, 0x08000000, 0 },
+ /* DDR (Code) - alias */
+ { 0x10000000, 0x80000000, 0x0FFE0000, 0 },
+ /* TCML */
+ { 0x1FFE0000, 0x007E0000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ /* TCMU */
+ { 0x20000000, 0x00800000, 0x00020000, ATT_OWN | ATT_IOMEM},
+ /* OCRAM_S */
+ { 0x20180000, 0x00180000, 0x00008000, ATT_OWN },
+ /* OCRAM */
+ { 0x20200000, 0x00900000, 0x00020000, ATT_OWN },
+ /* OCRAM */
+ { 0x20220000, 0x00920000, 0x00020000, ATT_OWN },
+ /* DDR (Data) */
+ { 0x40000000, 0x40000000, 0x80000000, 0 },
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx8ulp[] = {
+ {0x1FFC0000, 0x1FFC0000, 0xC0000, ATT_OWN},
+ {0x21000000, 0x21000000, 0x10000, ATT_OWN},
+ {0x80000000, 0x80000000, 0x60000000, 0}
+};
+
+static const struct imx_rproc_att imx_rproc_att_imx7ulp[] = {
+ {0x1FFD0000, 0x1FFD0000, 0x30000, ATT_OWN},
+ {0x20000000, 0x20000000, 0x10000, ATT_OWN},
+ {0x2F000000, 0x2F000000, 0x20000, ATT_OWN},
+ {0x2F020000, 0x2F020000, 0x20000, ATT_OWN},
+ {0x60000000, 0x60000000, 0x40000000, 0}
};
static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
@@ -100,12 +202,12 @@ static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
/* OCRAM_PXP (Code) - alias */
{ 0x00940000, 0x00940000, 0x00008000, 0 },
/* TCML (Code) */
- { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN },
+ { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* DDR (Code) - alias, first part of DDR (Data) */
{ 0x10000000, 0x80000000, 0x0FFF0000, 0 },
/* TCMU (Data) */
- { 0x20000000, 0x00800000, 0x00008000, ATT_OWN },
+ { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* OCRAM (Data) */
{ 0x20200000, 0x00900000, 0x00020000, 0 },
/* OCRAM_EPDC (Data) */
@@ -119,24 +221,52 @@ static const struct imx_rproc_att imx_rproc_att_imx7d[] = {
static const struct imx_rproc_att imx_rproc_att_imx6sx[] = {
/* dev addr , sys addr , size , flags */
/* TCML (M4 Boot Code) - alias */
- { 0x00000000, 0x007F8000, 0x00008000, 0 },
+ { 0x00000000, 0x007F8000, 0x00008000, ATT_IOMEM },
/* OCRAM_S (Code) */
{ 0x00180000, 0x008F8000, 0x00004000, 0 },
/* OCRAM_S (Code) - alias */
{ 0x00180000, 0x008FC000, 0x00004000, 0 },
/* TCML (Code) */
- { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN },
+ { 0x1FFF8000, 0x007F8000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* DDR (Code) - alias, first part of DDR (Data) */
{ 0x10000000, 0x80000000, 0x0FFF8000, 0 },
/* TCMU (Data) */
- { 0x20000000, 0x00800000, 0x00008000, ATT_OWN },
+ { 0x20000000, 0x00800000, 0x00008000, ATT_OWN | ATT_IOMEM },
/* OCRAM_S (Data) - alias? */
{ 0x208F8000, 0x008F8000, 0x00004000, 0 },
/* DDR (Data) */
{ 0x80000000, 0x80000000, 0x60000000, 0 },
};
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mn = {
+ .att = imx_rproc_att_imx8mn,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mn),
+ .method = IMX_RPROC_SMC,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8mq = {
+ .src_reg = IMX7D_SRC_SCR,
+ .src_mask = IMX7D_M4_RST_MASK,
+ .src_start = IMX7D_M4_START,
+ .src_stop = IMX7D_M4_STOP,
+ .att = imx_rproc_att_imx8mq,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8mq),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx8ulp = {
+ .att = imx_rproc_att_imx8ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx8ulp),
+ .method = IMX_RPROC_NONE,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx7ulp = {
+ .att = imx_rproc_att_imx7ulp,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx7ulp),
+ .method = IMX_RPROC_NONE,
+};
+
static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_reg = IMX7D_SRC_SCR,
.src_mask = IMX7D_M4_RST_MASK,
@@ -144,6 +274,7 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx7d = {
.src_stop = IMX7D_M4_STOP,
.att = imx_rproc_att_imx7d,
.att_size = ARRAY_SIZE(imx_rproc_att_imx7d),
+ .method = IMX_RPROC_MMIO,
};
static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
@@ -153,6 +284,13 @@ static const struct imx_rproc_dcfg imx_rproc_cfg_imx6sx = {
.src_stop = IMX6SX_M4_STOP,
.att = imx_rproc_att_imx6sx,
.att_size = ARRAY_SIZE(imx_rproc_att_imx6sx),
+ .method = IMX_RPROC_MMIO,
+};
+
+static const struct imx_rproc_dcfg imx_rproc_cfg_imx93 = {
+ .att = imx_rproc_att_imx93,
+ .att_size = ARRAY_SIZE(imx_rproc_att_imx93),
+ .method = IMX_RPROC_SMC,
};
static int imx_rproc_start(struct rproc *rproc)
@@ -160,12 +298,24 @@ static int imx_rproc_start(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
+ struct arm_smccc_res res;
int ret;
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
- dcfg->src_mask, dcfg->src_start);
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_start);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_START, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (ret)
- dev_err(dev, "Failed to enable M4!\n");
+ dev_err(dev, "Failed to enable remote core!\n");
return ret;
}
@@ -175,18 +325,32 @@ static int imx_rproc_stop(struct rproc *rproc)
struct imx_rproc *priv = rproc->priv;
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
struct device *dev = priv->dev;
+ struct arm_smccc_res res;
int ret;
- ret = regmap_update_bits(priv->regmap, dcfg->src_reg,
- dcfg->src_mask, dcfg->src_stop);
+ switch (dcfg->method) {
+ case IMX_RPROC_MMIO:
+ ret = regmap_update_bits(priv->regmap, dcfg->src_reg, dcfg->src_mask,
+ dcfg->src_stop);
+ break;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STOP, 0, 0, 0, 0, 0, 0, &res);
+ ret = res.a0;
+ if (res.a1)
+ dev_info(dev, "Not in wfi, force stopped\n");
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (ret)
- dev_err(dev, "Failed to stop M4!\n");
+ dev_err(dev, "Failed to stop remote core\n");
return ret;
}
static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
- int len, u64 *sys)
+ size_t len, u64 *sys, bool *is_iomem)
{
const struct imx_rproc_dcfg *dcfg = priv->dcfg;
int i;
@@ -199,33 +363,35 @@ static int imx_rproc_da_to_sys(struct imx_rproc *priv, u64 da,
unsigned int offset = da - att->da;
*sys = att->sa + offset;
+ if (is_iomem)
+ *is_iomem = att->flags & ATT_IOMEM;
return 0;
}
}
- dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%x\n",
+ dev_warn(priv->dev, "Translation failed: da = 0x%llx len = 0x%zx\n",
da, len);
return -ENOENT;
}
-static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct imx_rproc *priv = rproc->priv;
void *va = NULL;
u64 sys;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
/*
* On device side we have many aliases, so we need to convert device
* address (M4) to system bus address first.
*/
- if (imx_rproc_da_to_sys(priv, da, len, &sys))
+ if (imx_rproc_da_to_sys(priv, da, len, &sys, is_iomem))
return NULL;
- for (i = 0; i < IMX7D_RPROC_MEM_MAX; i++) {
+ for (i = 0; i < IMX_RPROC_MEM_MAX; i++) {
if (sys >= priv->mem[i].sys_addr && sys + len <
priv->mem[i].sys_addr + priv->mem[i].size) {
unsigned int offset = sys - priv->mem[i].sys_addr;
@@ -235,15 +401,152 @@ static void *imx_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%p\n", da, len, va);
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%p\n",
+ da, len, va);
return va;
}
+static int imx_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = rproc->dev.parent;
+ void *va;
+
+ dev_dbg(dev, "map memory: %p+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (IS_ERR_OR_NULL(va)) {
+ dev_err(dev, "Unable to map memory region: %p+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int imx_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int imx_rproc_prepare(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device_node *np = priv->dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+ /*
+ * Ignore the first memory region which will be used vdev buffer.
+ * No need to do extra handlings, rproc_add_virtio_dev will handle it.
+ */
+ if (!strcmp(it.node->name, "vdev0buffer"))
+ continue;
+
+ if (!strcmp(it.node->name, "rsc-table"))
+ continue;
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(priv->dev, "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ /* No need to translate pa to da, i.MX use same map */
+ da = rmem->base;
+
+ /* Register memory region */
+ mem = rproc_mem_entry_init(priv->dev, NULL, (dma_addr_t)rmem->base, rmem->size, da,
+ imx_rproc_mem_alloc, imx_rproc_mem_release,
+ it.node->name);
+
+ if (mem)
+ rproc_coredump_add_segment(rproc, da, rmem->size);
+ else
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int imx_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static void imx_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct imx_rproc *priv = rproc->priv;
+ int err;
+ __u32 mmsg;
+
+ if (!priv->tx_ch) {
+ dev_err(priv->dev, "No initialized mbox tx channel\n");
+ return;
+ }
+
+ /*
+ * Send the index of the triggered virtqueue as the mu payload.
+ * Let remote processor know which virtqueue is used.
+ */
+ mmsg = vqid << 16;
+
+ err = mbox_send_message(priv->tx_ch, (void *)&mmsg);
+ if (err < 0)
+ dev_err(priv->dev, "%s: failed (%d, err:%d)\n",
+ __func__, vqid, err);
+}
+
+static int imx_rproc_attach(struct rproc *rproc)
+{
+ return 0;
+}
+
+static struct resource_table *imx_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ /* The resource table has already been mapped in imx_rproc_addr_init */
+ if (!priv->rsc_table)
+ return NULL;
+
+ *table_sz = SZ_1K;
+ return (struct resource_table *)priv->rsc_table;
+}
+
static const struct rproc_ops imx_rproc_ops = {
+ .prepare = imx_rproc_prepare,
+ .attach = imx_rproc_attach,
.start = imx_rproc_start,
.stop = imx_rproc_stop,
+ .kick = imx_rproc_kick,
.da_to_va = imx_rproc_da_to_va,
+ .load = rproc_elf_load_segments,
+ .parse_fw = imx_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = imx_rproc_get_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
};
static int imx_rproc_addr_init(struct imx_rproc *priv,
@@ -261,13 +564,17 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
if (!(att->flags & ATT_OWN))
continue;
- if (b >= IMX7D_RPROC_MEM_MAX)
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
- att->sa, att->size);
+ if (att->flags & ATT_IOMEM)
+ priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev,
+ att->sa, att->size);
+ else
+ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev,
+ att->sa, att->size);
if (!priv->mem[b].cpu_addr) {
- dev_err(dev, "devm_ioremap_resource failed\n");
+ dev_err(dev, "failed to remap %#x bytes from %#x\n", att->size, att->sa);
return -ENOMEM;
}
priv->mem[b].sys_addr = att->sa;
@@ -286,47 +593,178 @@ static int imx_rproc_addr_init(struct imx_rproc *priv,
struct resource res;
node = of_parse_phandle(np, "memory-region", a);
+ /* Not map vdevbuffer, vdevring region */
+ if (!strncmp(node->name, "vdev", strlen("vdev")))
+ continue;
err = of_address_to_resource(node, 0, &res);
if (err) {
dev_err(dev, "unable to resolve memory region\n");
return err;
}
- if (b >= IMX7D_RPROC_MEM_MAX)
+ of_node_put(node);
+
+ if (b >= IMX_RPROC_MEM_MAX)
break;
- priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res);
- if (IS_ERR(priv->mem[b].cpu_addr)) {
- dev_err(dev, "devm_ioremap_resource failed\n");
- err = PTR_ERR(priv->mem[b].cpu_addr);
- return err;
+ /* Not use resource version, because we might share region */
+ priv->mem[b].cpu_addr = devm_ioremap_wc(&pdev->dev, res.start, resource_size(&res));
+ if (!priv->mem[b].cpu_addr) {
+ dev_err(dev, "failed to remap %pr\n", &res);
+ return -ENOMEM;
}
priv->mem[b].sys_addr = res.start;
priv->mem[b].size = resource_size(&res);
+ if (!strcmp(node->name, "rsc-table"))
+ priv->rsc_table = priv->mem[b].cpu_addr;
b++;
}
return 0;
}
-static int imx_rproc_probe(struct platform_device *pdev)
+static void imx_rproc_vq_work(struct work_struct *work)
+{
+ struct imx_rproc *priv = container_of(work, struct imx_rproc,
+ rproc_work);
+
+ rproc_vq_interrupt(priv->rproc, 0);
+ rproc_vq_interrupt(priv->rproc, 1);
+}
+
+static void imx_rproc_rx_callback(struct mbox_client *cl, void *msg)
+{
+ struct rproc *rproc = dev_get_drvdata(cl->dev);
+ struct imx_rproc *priv = rproc->priv;
+
+ queue_work(priv->workqueue, &priv->rproc_work);
+}
+
+static int imx_rproc_xtr_mbox_init(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+ struct device *dev = priv->dev;
+ struct mbox_client *cl;
+ int ret;
+
+ if (!of_get_property(dev->of_node, "mbox-names", NULL))
+ return 0;
+
+ cl = &priv->cl;
+ cl->dev = dev;
+ cl->tx_block = true;
+ cl->tx_tout = 100;
+ cl->knows_txdone = false;
+ cl->rx_callback = imx_rproc_rx_callback;
+
+ priv->tx_ch = mbox_request_channel_byname(cl, "tx");
+ if (IS_ERR(priv->tx_ch)) {
+ ret = PTR_ERR(priv->tx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request tx mailbox channel: %d\n", ret);
+ }
+
+ priv->rx_ch = mbox_request_channel_byname(cl, "rx");
+ if (IS_ERR(priv->rx_ch)) {
+ mbox_free_channel(priv->tx_ch);
+ ret = PTR_ERR(priv->rx_ch);
+ return dev_err_probe(cl->dev, ret,
+ "failed to request rx mailbox channel: %d\n", ret);
+ }
+
+ return 0;
+}
+
+static void imx_rproc_free_mbox(struct rproc *rproc)
+{
+ struct imx_rproc *priv = rproc->priv;
+
+ mbox_free_channel(priv->tx_ch);
+ mbox_free_channel(priv->rx_ch);
+}
+
+static int imx_rproc_detect_mode(struct imx_rproc *priv)
{
- struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
- struct imx_rproc *priv;
- struct rproc *rproc;
struct regmap_config config = { .name = "imx-rproc" };
- const struct imx_rproc_dcfg *dcfg;
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
struct regmap *regmap;
+ struct arm_smccc_res res;
int ret;
+ u32 val;
- regmap = syscon_regmap_lookup_by_phandle(np, "syscon");
+ switch (dcfg->method) {
+ case IMX_RPROC_NONE:
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ case IMX_RPROC_SMC:
+ arm_smccc_smc(IMX_SIP_RPROC, IMX_SIP_RPROC_STARTED, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0)
+ priv->rproc->state = RPROC_DETACHED;
+ return 0;
+ default:
+ break;
+ }
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
if (IS_ERR(regmap)) {
dev_err(dev, "failed to find syscon\n");
return PTR_ERR(regmap);
}
+
+ priv->regmap = regmap;
regmap_attach_dev(dev, regmap, &config);
+ ret = regmap_read(regmap, dcfg->src_reg, &val);
+ if (ret) {
+ dev_err(dev, "Failed to read src\n");
+ return ret;
+ }
+
+ if ((val & dcfg->src_mask) != dcfg->src_stop)
+ priv->rproc->state = RPROC_DETACHED;
+
+ return 0;
+}
+
+static int imx_rproc_clk_enable(struct imx_rproc *priv)
+{
+ const struct imx_rproc_dcfg *dcfg = priv->dcfg;
+ struct device *dev = priv->dev;
+ int ret;
+
+ /* Remote core is not under control of Linux */
+ if (dcfg->method == IMX_RPROC_NONE)
+ return 0;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ return PTR_ERR(priv->clk);
+ }
+
+ /*
+ * clk for M4 block including memory. Should be
+ * enabled before .start for FW transfer.
+ */
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable clock\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx_rproc *priv;
+ struct rproc *rproc;
+ const struct imx_rproc_dcfg *dcfg;
+ int ret;
+
/* set some other name then imx */
rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
NULL, sizeof(*priv));
@@ -341,34 +779,39 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv = rproc->priv;
priv->rproc = rproc;
- priv->regmap = regmap;
priv->dcfg = dcfg;
priv->dev = dev;
dev_set_drvdata(dev, rproc);
+ priv->workqueue = create_workqueue(dev_name(dev));
+ if (!priv->workqueue) {
+ dev_err(dev, "cannot create workqueue\n");
+ ret = -ENOMEM;
+ goto err_put_rproc;
+ }
+
+ ret = imx_rproc_xtr_mbox_init(rproc);
+ if (ret)
+ goto err_put_wkq;
ret = imx_rproc_addr_init(priv, pdev);
if (ret) {
dev_err(dev, "failed on imx_rproc_addr_init\n");
- goto err_put_rproc;
+ goto err_put_mbox;
}
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "Failed to get clock\n");
- ret = PTR_ERR(priv->clk);
- goto err_put_rproc;
- }
+ ret = imx_rproc_detect_mode(priv);
+ if (ret)
+ goto err_put_mbox;
- /*
- * clk for M4 block including memory. Should be
- * enabled before .start for FW transfer.
- */
- ret = clk_prepare_enable(priv->clk);
- if (ret) {
- dev_err(&rproc->dev, "Failed to enable clock\n");
- goto err_put_rproc;
- }
+ ret = imx_rproc_clk_enable(priv);
+ if (ret)
+ goto err_put_mbox;
+
+ INIT_WORK(&priv->rproc_work, imx_rproc_vq_work);
+
+ if (rproc->state != RPROC_DETACHED)
+ rproc->auto_boot = of_property_read_bool(np, "fsl,auto-boot");
ret = rproc_add(rproc);
if (ret) {
@@ -380,6 +823,10 @@ static int imx_rproc_probe(struct platform_device *pdev)
err_put_clk:
clk_disable_unprepare(priv->clk);
+err_put_mbox:
+ imx_rproc_free_mbox(rproc);
+err_put_wkq:
+ destroy_workqueue(priv->workqueue);
err_put_rproc:
rproc_free(rproc);
@@ -393,14 +840,23 @@ static int imx_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
+ imx_rproc_free_mbox(rproc);
+ destroy_workqueue(priv->workqueue);
rproc_free(rproc);
return 0;
}
static const struct of_device_id imx_rproc_of_match[] = {
+ { .compatible = "fsl,imx7ulp-cm4", .data = &imx_rproc_cfg_imx7ulp },
{ .compatible = "fsl,imx7d-cm4", .data = &imx_rproc_cfg_imx7d },
{ .compatible = "fsl,imx6sx-cm4", .data = &imx_rproc_cfg_imx6sx },
+ { .compatible = "fsl,imx8mq-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mm-cm4", .data = &imx_rproc_cfg_imx8mq },
+ { .compatible = "fsl,imx8mn-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8mp-cm7", .data = &imx_rproc_cfg_imx8mn },
+ { .compatible = "fsl,imx8ulp-cm33", .data = &imx_rproc_cfg_imx8ulp },
+ { .compatible = "fsl,imx93-cm33", .data = &imx_rproc_cfg_imx93 },
{},
};
MODULE_DEVICE_TABLE(of, imx_rproc_of_match);
@@ -417,5 +873,5 @@ static struct platform_driver imx_rproc_driver = {
module_platform_driver(imx_rproc_driver);
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("IMX6SX/7D remote processor control driver");
+MODULE_DESCRIPTION("i.MX remote processor control driver");
MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
diff --git a/drivers/remoteproc/imx_rproc.h b/drivers/remoteproc/imx_rproc.h
new file mode 100644
index 000000000000..1c7e2127c758
--- /dev/null
+++ b/drivers/remoteproc/imx_rproc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+ * Copyright 2021 NXP
+ */
+
+#ifndef _IMX_RPROC_H
+#define _IMX_RPROC_H
+
+/* address translation table */
+struct imx_rproc_att {
+ u32 da; /* device address (From Cortex M4 view)*/
+ u32 sa; /* system bus address */
+ u32 size; /* size of reg range */
+ int flags;
+};
+
+/* Remote core start/stop method */
+enum imx_rproc_method {
+ IMX_RPROC_NONE,
+ /* Through syscon regmap */
+ IMX_RPROC_MMIO,
+ /* Through ARM SMCCC */
+ IMX_RPROC_SMC,
+ /* Through System Control Unit API */
+ IMX_RPROC_SCU_API,
+};
+
+struct imx_rproc_dcfg {
+ u32 src_reg;
+ u32 src_mask;
+ u32 src_start;
+ u32 src_stop;
+ const struct imx_rproc_att *att;
+ size_t att_size;
+ enum imx_rproc_method method;
+};
+
+#endif /* _IMX_RPROC_H */
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
new file mode 100644
index 000000000000..9902cce28692
--- /dev/null
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Ingenic JZ47xx remoteproc driver
+ * Copyright 2019, Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+
+#define REG_AUX_CTRL 0x0
+#define REG_AUX_MSG_ACK 0x10
+#define REG_AUX_MSG 0x14
+#define REG_CORE_MSG_ACK 0x18
+#define REG_CORE_MSG 0x1C
+
+#define AUX_CTRL_SLEEP BIT(31)
+#define AUX_CTRL_MSG_IRQ_EN BIT(3)
+#define AUX_CTRL_NMI_RESETS BIT(2)
+#define AUX_CTRL_NMI BIT(1)
+#define AUX_CTRL_SW_RESET BIT(0)
+
+static bool auto_boot;
+module_param(auto_boot, bool, 0400);
+MODULE_PARM_DESC(auto_boot,
+ "Auto-boot the remote processor [default=false]");
+
+struct vpu_mem_map {
+ const char *name;
+ unsigned int da;
+};
+
+struct vpu_mem_info {
+ const struct vpu_mem_map *map;
+ unsigned long len;
+ void __iomem *base;
+};
+
+static const struct vpu_mem_map vpu_mem_map[] = {
+ { "tcsm0", 0x132b0000 },
+ { "tcsm1", 0xf4000000 },
+ { "sram", 0x132f0000 },
+};
+
+/**
+ * struct vpu - Ingenic VPU remoteproc private structure
+ * @irq: interrupt number
+ * @clks: pointers to the VPU and AUX clocks
+ * @aux_base: raw pointer to the AUX interface registers
+ * @mem_info: array of struct vpu_mem_info, which contain the mapping info of
+ * each of the external memories
+ * @dev: private pointer to the device
+ */
+struct vpu {
+ int irq;
+ struct clk_bulk_data clks[2];
+ void __iomem *aux_base;
+ struct vpu_mem_info mem_info[ARRAY_SIZE(vpu_mem_map)];
+ struct device *dev;
+};
+
+static int ingenic_rproc_prepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ int ret;
+
+ /* The clocks must be enabled for the firmware to be loaded in TCSM */
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret)
+ dev_err(vpu->dev, "Unable to start clocks: %d\n", ret);
+
+ return ret;
+}
+
+static int ingenic_rproc_unprepare(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks);
+
+ return 0;
+}
+
+static int ingenic_rproc_start(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+ u32 ctrl;
+
+ enable_irq(vpu->irq);
+
+ /* Reset the AUX and enable message IRQ */
+ ctrl = AUX_CTRL_NMI_RESETS | AUX_CTRL_NMI | AUX_CTRL_MSG_IRQ_EN;
+ writel(ctrl, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static int ingenic_rproc_stop(struct rproc *rproc)
+{
+ struct vpu *vpu = rproc->priv;
+
+ disable_irq(vpu->irq);
+
+ /* Keep AUX in reset mode */
+ writel(AUX_CTRL_SW_RESET, vpu->aux_base + REG_AUX_CTRL);
+
+ return 0;
+}
+
+static void ingenic_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct vpu *vpu = rproc->priv;
+
+ writel(vqid, vpu->aux_base + REG_CORE_MSG);
+}
+
+static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct vpu *vpu = rproc->priv;
+ void __iomem *va = NULL;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ const struct vpu_mem_info *info = &vpu->mem_info[i];
+ const struct vpu_mem_map *map = info->map;
+
+ if (da >= map->da && (da + len) < (map->da + info->len)) {
+ va = info->base + (da - map->da);
+ break;
+ }
+ }
+
+ return (__force void *)va;
+}
+
+static const struct rproc_ops ingenic_rproc_ops = {
+ .prepare = ingenic_rproc_prepare,
+ .unprepare = ingenic_rproc_unprepare,
+ .start = ingenic_rproc_start,
+ .stop = ingenic_rproc_stop,
+ .kick = ingenic_rproc_kick,
+ .da_to_va = ingenic_rproc_da_to_va,
+};
+
+static irqreturn_t vpu_interrupt(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct vpu *vpu = rproc->priv;
+ u32 vring;
+
+ vring = readl(vpu->aux_base + REG_AUX_MSG);
+
+ /* Ack the interrupt */
+ writel(0, vpu->aux_base + REG_AUX_MSG_ACK);
+
+ return rproc_vq_interrupt(rproc, vring);
+}
+
+static int ingenic_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem;
+ struct rproc *rproc;
+ struct vpu *vpu;
+ unsigned int i;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, "ingenic-vpu",
+ &ingenic_rproc_ops, NULL, sizeof(*vpu));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->auto_boot = auto_boot;
+
+ vpu = rproc->priv;
+ vpu->dev = &pdev->dev;
+ platform_set_drvdata(pdev, vpu);
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "aux");
+ vpu->aux_base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->aux_base)) {
+ dev_err(dev, "Failed to ioremap\n");
+ return PTR_ERR(vpu->aux_base);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(vpu_mem_map); i++) {
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ vpu_mem_map[i].name);
+
+ vpu->mem_info[i].base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(vpu->mem_info[i].base)) {
+ ret = PTR_ERR(vpu->mem_info[i].base);
+ dev_err(dev, "Failed to ioremap\n");
+ return ret;
+ }
+
+ vpu->mem_info[i].len = resource_size(mem);
+ vpu->mem_info[i].map = &vpu_mem_map[i];
+ }
+
+ vpu->clks[0].id = "vpu";
+ vpu->clks[1].id = "aux";
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(vpu->clks), vpu->clks);
+ if (ret) {
+ dev_err(dev, "Failed to get clocks\n");
+ return ret;
+ }
+
+ vpu->irq = platform_get_irq(pdev, 0);
+ if (vpu->irq < 0)
+ return vpu->irq;
+
+ ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN,
+ "VPU", rproc);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request IRQ\n");
+ return ret;
+ }
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "Failed to register remote processor\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id ingenic_rproc_of_matches[] = {
+ { .compatible = "ingenic,jz4770-vpu-rproc", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches);
+
+static struct platform_driver ingenic_rproc_driver = {
+ .probe = ingenic_rproc_probe,
+ .driver = {
+ .name = "ingenic-vpu",
+ .of_match_table = ingenic_rproc_of_matches,
+ },
+};
+module_platform_driver(ingenic_rproc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_DESCRIPTION("Ingenic JZ47xx Remote Processor control driver");
diff --git a/drivers/remoteproc/keystone_remoteproc.c b/drivers/remoteproc/keystone_remoteproc.c
index 5c4658f00b3d..54781f553f4e 100644
--- a/drivers/remoteproc/keystone_remoteproc.c
+++ b/drivers/remoteproc/keystone_remoteproc.c
@@ -246,7 +246,7 @@ static void keystone_rproc_kick(struct rproc *rproc, int vqid)
* can be used either by the remoteproc core for loading (when using kernel
* remoteproc loader), or by any rpmsg bus drivers.
*/
-static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct keystone_rproc *ksproc = rproc->priv;
void __iomem *va = NULL;
@@ -255,7 +255,7 @@ static void *keystone_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
size_t size;
int i;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < ksproc->num_mems; i++) {
diff --git a/drivers/remoteproc/meson_mx_ao_arc.c b/drivers/remoteproc/meson_mx_ao_arc.c
new file mode 100644
index 000000000000..462cddab6518
--- /dev/null
+++ b/drivers/remoteproc/meson_mx_ao_arc.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/sizes.h>
+
+#include "remoteproc_internal.h"
+
+#define AO_REMAP_REG0 0x0
+#define AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU GENMASK(3, 0)
+
+#define AO_REMAP_REG1 0x4
+#define AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR BIT(4)
+#define AO_REMAP_REG1_REMAP_AHB_SRAM_BITS_17_14_FOR_MEDIA_CPU GENMASK(3, 0)
+
+#define AO_CPU_CNTL 0x0
+#define AO_CPU_CNTL_AHB_SRAM_BITS_31_20 GENMASK(28, 16)
+#define AO_CPU_CNTL_HALT BIT(9)
+#define AO_CPU_CNTL_UNKNONWN BIT(8)
+#define AO_CPU_CNTL_RUN BIT(0)
+
+#define AO_CPU_STAT 0x4
+
+#define AO_SECURE_REG0 0x0
+#define AO_SECURE_REG0_AHB_SRAM_BITS_19_12 GENMASK(15, 8)
+
+/* Only bits [31:20] and [17:14] are usable, all other bits must be zero */
+#define MESON_AO_RPROC_SRAM_USABLE_BITS 0xfff3c000ULL
+
+#define MESON_AO_RPROC_MEMORY_OFFSET 0x10000000
+
+struct meson_mx_ao_arc_rproc_priv {
+ void __iomem *remap_base;
+ void __iomem *cpu_base;
+ unsigned long sram_va;
+ phys_addr_t sram_pa;
+ size_t sram_size;
+ struct gen_pool *sram_pool;
+ struct reset_control *arc_reset;
+ struct clk *arc_pclk;
+ struct regmap *secbus2_regmap;
+};
+
+static int meson_mx_ao_arc_rproc_start(struct rproc *rproc)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+ phys_addr_t translated_sram_addr;
+ u32 tmp;
+ int ret;
+
+ ret = clk_prepare_enable(priv->arc_pclk);
+ if (ret)
+ return ret;
+
+ tmp = FIELD_PREP(AO_REMAP_REG0_REMAP_AHB_SRAM_BITS_17_14_FOR_ARM_CPU,
+ priv->sram_pa >> 14);
+ writel(tmp, priv->remap_base + AO_REMAP_REG0);
+
+ /*
+ * The SRAM content as seen by the ARC core always starts at 0x0
+ * regardless of the value given here (this was discovered by trial and
+ * error). For SoCs older than Meson6 we probably have to set
+ * AO_REMAP_REG1_MOVE_AHB_SRAM_TO_0X0_INSTEAD_OF_DDR to achieve the
+ * same. (At least) For Meson8 and newer that bit must not be set.
+ */
+ writel(0x0, priv->remap_base + AO_REMAP_REG1);
+
+ regmap_update_bits(priv->secbus2_regmap, AO_SECURE_REG0,
+ AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
+ FIELD_PREP(AO_SECURE_REG0_AHB_SRAM_BITS_19_12,
+ priv->sram_pa >> 12));
+
+ ret = reset_control_reset(priv->arc_reset);
+ if (ret) {
+ clk_disable_unprepare(priv->arc_pclk);
+ return ret;
+ }
+
+ usleep_range(10, 100);
+
+ /*
+ * Convert from 0xd9000000 to 0xc9000000 as the vendor driver does.
+ * This only seems to be relevant for the AO_CPU_CNTL register. It is
+ * unknown why this is needed.
+ */
+ translated_sram_addr = priv->sram_pa - MESON_AO_RPROC_MEMORY_OFFSET;
+
+ tmp = FIELD_PREP(AO_CPU_CNTL_AHB_SRAM_BITS_31_20,
+ translated_sram_addr >> 20);
+ tmp |= AO_CPU_CNTL_UNKNONWN | AO_CPU_CNTL_RUN;
+ writel(tmp, priv->cpu_base + AO_CPU_CNTL);
+
+ usleep_range(20, 200);
+
+ return 0;
+}
+
+static int meson_mx_ao_arc_rproc_stop(struct rproc *rproc)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ writel(AO_CPU_CNTL_HALT, priv->cpu_base + AO_CPU_CNTL);
+
+ clk_disable_unprepare(priv->arc_pclk);
+
+ return 0;
+}
+
+static void *meson_mx_ao_arc_rproc_da_to_va(struct rproc *rproc, u64 da,
+ size_t len, bool *is_iomem)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ /* The memory from the ARC core's perspective always starts at 0x0. */
+ if ((da + len) > priv->sram_size)
+ return NULL;
+
+ return (void *)priv->sram_va + da;
+}
+
+static struct rproc_ops meson_mx_ao_arc_rproc_ops = {
+ .start = meson_mx_ao_arc_rproc_start,
+ .stop = meson_mx_ao_arc_rproc_stop,
+ .da_to_va = meson_mx_ao_arc_rproc_da_to_va,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .load = rproc_elf_load_segments,
+ .sanity_check = rproc_elf_sanity_check,
+};
+
+static int meson_mx_ao_arc_rproc_probe(struct platform_device *pdev)
+{
+ struct meson_mx_ao_arc_rproc_priv *priv;
+ struct device *dev = &pdev->dev;
+ const char *fw_name = NULL;
+ struct rproc *rproc;
+ int ret;
+
+ device_property_read_string(dev, "firmware-name", &fw_name);
+
+ rproc = devm_rproc_alloc(dev, "meson-mx-ao-arc",
+ &meson_mx_ao_arc_rproc_ops, fw_name,
+ sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ priv = rproc->priv;
+
+ priv->sram_pool = of_gen_pool_get(dev->of_node, "sram", 0);
+ if (!priv->sram_pool) {
+ dev_err(dev, "Could not get SRAM pool\n");
+ return -ENODEV;
+ }
+
+ priv->sram_size = gen_pool_avail(priv->sram_pool);
+
+ priv->sram_va = gen_pool_alloc(priv->sram_pool, priv->sram_size);
+ if (!priv->sram_va) {
+ dev_err(dev, "Could not alloc memory in SRAM pool\n");
+ return -ENOMEM;
+ }
+
+ priv->sram_pa = gen_pool_virt_to_phys(priv->sram_pool, priv->sram_va);
+ if (priv->sram_pa & ~MESON_AO_RPROC_SRAM_USABLE_BITS) {
+ dev_err(dev, "SRAM address contains unusable bits\n");
+ ret = -EINVAL;
+ goto err_free_genpool;
+ }
+
+ priv->secbus2_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "amlogic,secbus2");
+ if (IS_ERR(priv->secbus2_regmap)) {
+ dev_err(dev, "Failed to find SECBUS2 regmap\n");
+ ret = PTR_ERR(priv->secbus2_regmap);
+ goto err_free_genpool;
+ }
+
+ priv->remap_base = devm_platform_ioremap_resource_byname(pdev, "remap");
+ if (IS_ERR(priv->remap_base)) {
+ ret = PTR_ERR(priv->remap_base);
+ goto err_free_genpool;
+ }
+
+ priv->cpu_base = devm_platform_ioremap_resource_byname(pdev, "cpu");
+ if (IS_ERR(priv->cpu_base)) {
+ ret = PTR_ERR(priv->cpu_base);
+ goto err_free_genpool;
+ }
+
+ priv->arc_reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->arc_reset)) {
+ dev_err(dev, "Failed to get ARC reset\n");
+ ret = PTR_ERR(priv->arc_reset);
+ goto err_free_genpool;
+ }
+
+ priv->arc_pclk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->arc_pclk)) {
+ dev_err(dev, "Failed to get the ARC PCLK\n");
+ ret = PTR_ERR(priv->arc_pclk);
+ goto err_free_genpool;
+ }
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto err_free_genpool;
+
+ return 0;
+
+err_free_genpool:
+ gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
+ return ret;
+}
+
+static int meson_mx_ao_arc_rproc_remove(struct platform_device *pdev)
+{
+ struct rproc *rproc = platform_get_drvdata(pdev);
+ struct meson_mx_ao_arc_rproc_priv *priv = rproc->priv;
+
+ rproc_del(rproc);
+ gen_pool_free(priv->sram_pool, priv->sram_va, priv->sram_size);
+
+ return 0;
+}
+
+static const struct of_device_id meson_mx_ao_arc_rproc_match[] = {
+ { .compatible = "amlogic,meson8-ao-arc" },
+ { .compatible = "amlogic,meson8b-ao-arc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, meson_mx_ao_arc_rproc_match);
+
+static struct platform_driver meson_mx_ao_arc_rproc_driver = {
+ .probe = meson_mx_ao_arc_rproc_probe,
+ .remove = meson_mx_ao_arc_rproc_remove,
+ .driver = {
+ .name = "meson-mx-ao-arc-rproc",
+ .of_match_table = meson_mx_ao_arc_rproc_match,
+ },
+};
+module_platform_driver(meson_mx_ao_arc_rproc_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson6/8/8b/8m2 AO ARC remote processor driver");
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/mtk_common.h b/drivers/remoteproc/mtk_common.h
new file mode 100644
index 000000000000..ea6fa1100a00
--- /dev/null
+++ b/drivers/remoteproc/mtk_common.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef __RPROC_MTK_COMMON_H
+#define __RPROC_MTK_COMMON_H
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#define MT8183_SW_RSTN 0x0
+#define MT8183_SW_RSTN_BIT BIT(0)
+#define MT8183_SCP_TO_HOST 0x1C
+#define MT8183_SCP_IPC_INT_BIT BIT(0)
+#define MT8183_SCP_WDT_INT_BIT BIT(8)
+#define MT8183_HOST_TO_SCP 0x28
+#define MT8183_HOST_IPC_INT_BIT BIT(0)
+#define MT8183_WDT_CFG 0x84
+#define MT8183_SCP_CLK_SW_SEL 0x4000
+#define MT8183_SCP_CLK_DIV_SEL 0x4024
+#define MT8183_SCP_SRAM_PDN 0x402C
+#define MT8183_SCP_L1_SRAM_PD 0x4080
+#define MT8183_SCP_TCM_TAIL_SRAM_PD 0x4094
+
+#define MT8183_SCP_CACHE_SEL(x) (0x14000 + (x) * 0x3000)
+#define MT8183_SCP_CACHE_CON MT8183_SCP_CACHE_SEL(0)
+#define MT8183_SCP_DCACHE_CON MT8183_SCP_CACHE_SEL(1)
+#define MT8183_SCP_CACHESIZE_8KB BIT(8)
+#define MT8183_SCP_CACHE_CON_WAYEN BIT(10)
+
+#define MT8186_SCP_L1_SRAM_PD_P1 0x40B0
+#define MT8186_SCP_L1_SRAM_PD_p2 0x40B4
+
+#define MT8192_L2TCM_SRAM_PD_0 0x10C0
+#define MT8192_L2TCM_SRAM_PD_1 0x10C4
+#define MT8192_L2TCM_SRAM_PD_2 0x10C8
+#define MT8192_L1TCM_SRAM_PDN 0x102C
+#define MT8192_CPU0_SRAM_PD 0x1080
+
+#define MT8192_SCP2APMCU_IPC_SET 0x4080
+#define MT8192_SCP2APMCU_IPC_CLR 0x4084
+#define MT8192_SCP_IPC_INT_BIT BIT(0)
+#define MT8192_SCP2SPM_IPC_CLR 0x4094
+#define MT8192_GIPC_IN_SET 0x4098
+#define MT8192_HOST_IPC_INT_BIT BIT(0)
+
+#define MT8192_CORE0_SW_RSTN_CLR 0x10000
+#define MT8192_CORE0_SW_RSTN_SET 0x10004
+#define MT8192_CORE0_MEM_ATT_PREDEF 0x10008
+#define MT8192_CORE0_WDT_IRQ 0x10030
+#define MT8192_CORE0_WDT_CFG 0x10034
+
+#define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS GENMASK(7, 4)
+
+#define SCP_FW_VER_LEN 32
+#define SCP_SHARE_BUFFER_SIZE 288
+
+struct scp_run {
+ u32 signaled;
+ s8 fw_ver[SCP_FW_VER_LEN];
+ u32 dec_capability;
+ u32 enc_capability;
+ wait_queue_head_t wq;
+};
+
+struct scp_ipi_desc {
+ /* For protecting handler. */
+ struct mutex lock;
+ scp_ipi_handler_t handler;
+ void *priv;
+};
+
+struct mtk_scp;
+
+struct mtk_scp_of_data {
+ int (*scp_clk_get)(struct mtk_scp *scp);
+ int (*scp_before_load)(struct mtk_scp *scp);
+ void (*scp_irq_handler)(struct mtk_scp *scp);
+ void (*scp_reset_assert)(struct mtk_scp *scp);
+ void (*scp_reset_deassert)(struct mtk_scp *scp);
+ void (*scp_stop)(struct mtk_scp *scp);
+ void *(*scp_da_to_va)(struct mtk_scp *scp, u64 da, size_t len);
+
+ u32 host_to_scp_reg;
+ u32 host_to_scp_int_bit;
+
+ size_t ipi_buf_offset;
+};
+
+struct mtk_scp {
+ struct device *dev;
+ struct rproc *rproc;
+ struct clk *clk;
+ void __iomem *reg_base;
+ void __iomem *sram_base;
+ size_t sram_size;
+ phys_addr_t sram_phys;
+ void __iomem *l1tcm_base;
+ size_t l1tcm_size;
+ phys_addr_t l1tcm_phys;
+
+ const struct mtk_scp_of_data *data;
+
+ struct mtk_share_obj __iomem *recv_buf;
+ struct mtk_share_obj __iomem *send_buf;
+ struct scp_run run;
+ /* To prevent multiple ipi_send run concurrently. */
+ struct mutex send_lock;
+ struct scp_ipi_desc ipi_desc[SCP_IPI_MAX];
+ bool ipi_id_ack[SCP_IPI_MAX];
+ wait_queue_head_t ack_wq;
+
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ size_t dram_size;
+
+ struct rproc_subdev *rpmsg_subdev;
+};
+
+/**
+ * struct mtk_share_obj - SRAM buffer shared with AP and SCP
+ *
+ * @id: IPI id
+ * @len: share buffer length
+ * @share_buf: share buffer data
+ */
+struct mtk_share_obj {
+ u32 id;
+ u32 len;
+ u8 share_buf[SCP_SHARE_BUFFER_SIZE];
+};
+
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len);
+void scp_ipi_lock(struct mtk_scp *scp, u32 id);
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id);
+
+#endif
diff --git a/drivers/remoteproc/mtk_scp.c b/drivers/remoteproc/mtk_scp.c
new file mode 100644
index 000000000000..47b2a40e1b4a
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp.c
@@ -0,0 +1,994 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <linux/rpmsg/mtk_rpmsg.h>
+
+#include "mtk_common.h"
+#include "remoteproc_internal.h"
+
+#define MAX_CODE_SIZE 0x500000
+#define SECTION_NAME_IPI_BUFFER ".ipi_buffer"
+
+/**
+ * scp_get() - get a reference to SCP.
+ *
+ * @pdev: the platform device of the module requesting SCP platform
+ * device for using SCP API.
+ *
+ * Return: Return NULL if failed. otherwise reference to SCP.
+ **/
+struct mtk_scp *scp_get(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *scp_node;
+ struct platform_device *scp_pdev;
+
+ scp_node = of_parse_phandle(dev->of_node, "mediatek,scp", 0);
+ if (!scp_node) {
+ dev_err(dev, "can't get SCP node\n");
+ return NULL;
+ }
+
+ scp_pdev = of_find_device_by_node(scp_node);
+ of_node_put(scp_node);
+
+ if (WARN_ON(!scp_pdev)) {
+ dev_err(dev, "SCP pdev failed\n");
+ return NULL;
+ }
+
+ return platform_get_drvdata(scp_pdev);
+}
+EXPORT_SYMBOL_GPL(scp_get);
+
+/**
+ * scp_put() - "free" the SCP
+ *
+ * @scp: mtk_scp structure from scp_get().
+ **/
+void scp_put(struct mtk_scp *scp)
+{
+ put_device(scp->dev);
+}
+EXPORT_SYMBOL_GPL(scp_put);
+
+static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
+{
+ dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
+ rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+}
+
+static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)priv;
+ struct scp_run *run = (struct scp_run *)data;
+
+ scp->run.signaled = run->signaled;
+ strscpy(scp->run.fw_ver, run->fw_ver, SCP_FW_VER_LEN);
+ scp->run.dec_capability = run->dec_capability;
+ scp->run.enc_capability = run->enc_capability;
+ wake_up_interruptible(&scp->run.wq);
+}
+
+static void scp_ipi_handler(struct mtk_scp *scp)
+{
+ struct mtk_share_obj __iomem *rcv_obj = scp->recv_buf;
+ struct scp_ipi_desc *ipi_desc = scp->ipi_desc;
+ u8 tmp_data[SCP_SHARE_BUFFER_SIZE];
+ scp_ipi_handler_t handler;
+ u32 id = readl(&rcv_obj->id);
+ u32 len = readl(&rcv_obj->len);
+
+ if (len > SCP_SHARE_BUFFER_SIZE) {
+ dev_err(scp->dev, "ipi message too long (len %d, max %d)", len,
+ SCP_SHARE_BUFFER_SIZE);
+ return;
+ }
+ if (id >= SCP_IPI_MAX) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ return;
+ }
+
+ scp_ipi_lock(scp, id);
+ handler = ipi_desc[id].handler;
+ if (!handler) {
+ dev_err(scp->dev, "No such ipi id = %d\n", id);
+ scp_ipi_unlock(scp, id);
+ return;
+ }
+
+ memcpy_fromio(tmp_data, &rcv_obj->share_buf, len);
+ handler(tmp_data, len, ipi_desc[id].priv);
+ scp_ipi_unlock(scp, id);
+
+ scp->ipi_id_ack[id] = true;
+ wake_up(&scp->ack_wq);
+}
+
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset);
+
+static int scp_ipi_init(struct mtk_scp *scp, const struct firmware *fw)
+{
+ int ret;
+ size_t offset;
+
+ /* read the ipi buf addr from FW itself first */
+ ret = scp_elf_read_ipi_buf_addr(scp, fw, &offset);
+ if (ret) {
+ /* use default ipi buf addr if the FW doesn't have it */
+ offset = scp->data->ipi_buf_offset;
+ if (!offset)
+ return ret;
+ }
+ dev_info(scp->dev, "IPI buf addr %#010zx\n", offset);
+
+ scp->recv_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset);
+ scp->send_buf = (struct mtk_share_obj __iomem *)
+ (scp->sram_base + offset + sizeof(*scp->recv_buf));
+ memset_io(scp->recv_buf, 0, sizeof(*scp->recv_buf));
+ memset_io(scp->send_buf, 0, sizeof(*scp->send_buf));
+
+ return 0;
+}
+
+static void mt8183_scp_reset_assert(struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val &= ~MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
+{
+ u32 val;
+
+ val = readl(scp->reg_base + MT8183_SW_RSTN);
+ val |= MT8183_SW_RSTN_BIT;
+ writel(val, scp->reg_base + MT8183_SW_RSTN);
+}
+
+static void mt8192_scp_reset_assert(struct mtk_scp *scp)
+{
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+}
+
+static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
+{
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+}
+
+static void mt8183_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+ if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
+ scp_ipi_handler(scp);
+ else
+ scp_wdt_handler(scp, scp_to_host);
+
+ /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
+ writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
+ scp->reg_base + MT8183_SCP_TO_HOST);
+}
+
+static void mt8192_scp_irq_handler(struct mtk_scp *scp)
+{
+ u32 scp_to_host;
+
+ scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+ if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+ scp_ipi_handler(scp);
+
+ /*
+ * SCP won't send another interrupt until we clear
+ * MT8192_SCP2APMCU_IPC.
+ */
+ writel(MT8192_SCP_IPC_INT_BIT,
+ scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+ } else {
+ scp_wdt_handler(scp, scp_to_host);
+ writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
+ }
+}
+
+static irqreturn_t scp_irq_handler(int irq, void *priv)
+{
+ struct mtk_scp *scp = priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return IRQ_NONE;
+ }
+
+ scp->data->scp_irq_handler(scp);
+
+ clk_disable_unprepare(scp->clk);
+
+ return IRQ_HANDLED;
+}
+
+static int scp_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ void __iomem *ptr;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (phdr->p_type != PT_LOAD)
+ continue;
+ if (!filesz)
+ continue;
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ ptr = (void __iomem *)rproc_da_to_va(rproc, da, memsz, NULL);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* put the segment where the remote processor expects it */
+ scp_memcpy_aligned(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ return ret;
+}
+
+static int scp_elf_read_ipi_buf_addr(struct mtk_scp *scp,
+ const struct firmware *fw,
+ size_t *offset)
+{
+ struct elf32_hdr *ehdr;
+ struct elf32_shdr *shdr, *shdr_strtab;
+ int i;
+ const u8 *elf_data = fw->data;
+ const char *strtab;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ shdr_strtab = shdr + ehdr->e_shstrndx;
+ strtab = (const char *)(elf_data + shdr_strtab->sh_offset);
+
+ for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
+ if (strcmp(strtab + shdr->sh_name,
+ SECTION_NAME_IPI_BUFFER) == 0) {
+ *offset = shdr->sh_addr;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+static int mt8183_scp_clk_get(struct mtk_scp *scp)
+{
+ struct device *dev = scp->dev;
+ int ret = 0;
+
+ scp->clk = devm_clk_get(dev, "main");
+ if (IS_ERR(scp->clk)) {
+ dev_err(dev, "Failed to get clock\n");
+ ret = PTR_ERR(scp->clk);
+ }
+
+ return ret;
+}
+
+static int mt8192_scp_clk_get(struct mtk_scp *scp)
+{
+ return mt8183_scp_clk_get(scp);
+}
+
+static int mt8195_scp_clk_get(struct mtk_scp *scp)
+{
+ scp->clk = NULL;
+
+ return 0;
+}
+
+static int mt8183_scp_before_load(struct mtk_scp *scp)
+{
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+
+ /* Turn on the power of SCP's SRAM before using it. */
+ writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+
+ /*
+ * Set I-cache and D-cache size before loading SCP FW.
+ * SCP SRAM logical address may change when cache size setting differs.
+ */
+ writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
+ scp->reg_base + MT8183_SCP_CACHE_CON);
+ writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+
+ return 0;
+}
+
+static void scp_sram_power_on(void __iomem *addr, u32 reserved_mask)
+{
+ int i;
+
+ for (i = 31; i >= 0; i--)
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
+ writel(0, addr);
+}
+
+static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
+{
+ int i;
+
+ writel(0, addr);
+ for (i = 0; i < 32; i++)
+ writel(GENMASK(i, 0) & ~reserved_mask, addr);
+}
+
+static int mt8186_scp_before_load(struct mtk_scp *scp)
+{
+ /* Clear SCP to host interrupt */
+ writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+
+ /* Reset clocks before loading FW */
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
+ writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+
+ /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
+ scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
+
+ /* Initialize TCM before loading FW. */
+ writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+ writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+
+ return 0;
+}
+
+static int mt8192_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int mt8195_scp_before_load(struct mtk_scp *scp)
+{
+ /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+ writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+ writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+
+ /* enable SRAM clock */
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* enable MPU for all memory regions */
+ writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+ return 0;
+}
+
+static int scp_load(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Hold SCP in reset while loading FW. */
+ scp->data->scp_reset_assert(scp);
+
+ ret = scp->data->scp_before_load(scp);
+ if (ret < 0)
+ goto leave;
+
+ ret = scp_elf_load_segments(rproc, fw);
+leave:
+ clk_disable_unprepare(scp->clk);
+
+ return ret;
+}
+
+static int scp_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct mtk_scp *scp = rproc->priv;
+ struct device *dev = scp->dev;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ ret = scp_ipi_init(scp, fw);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
+static int scp_start(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ struct device *dev = scp->dev;
+ struct scp_run *run = &scp->run;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ run->signaled = false;
+
+ scp->data->scp_reset_deassert(scp);
+
+ ret = wait_event_interruptible_timeout(
+ run->wq,
+ run->signaled,
+ msecs_to_jiffies(2000));
+
+ if (ret == 0) {
+ dev_err(dev, "wait SCP initialization timeout!\n");
+ ret = -ETIME;
+ goto stop;
+ }
+ if (ret == -ERESTARTSYS) {
+ dev_err(dev, "wait SCP interrupted by a signal!\n");
+ goto stop;
+ }
+
+ clk_disable_unprepare(scp->clk);
+ dev_info(dev, "SCP is ready. FW version %s\n", run->fw_ver);
+
+ return 0;
+
+stop:
+ scp->data->scp_reset_assert(scp);
+ clk_disable_unprepare(scp->clk);
+ return ret;
+}
+
+static void *mt8183_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
+{
+ int offset;
+
+ if (da < scp->sram_size) {
+ offset = da;
+ if (offset >= 0 && (offset + len) <= scp->sram_size)
+ return (void __force *)scp->sram_base + offset;
+ } else if (scp->dram_size) {
+ offset = da - scp->dma_addr;
+ if (offset >= 0 && (offset + len) <= scp->dram_size)
+ return scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
+{
+ int offset;
+
+ if (da >= scp->sram_phys &&
+ (da + len) <= scp->sram_phys + scp->sram_size) {
+ offset = da - scp->sram_phys;
+ return (void __force *)scp->sram_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->l1tcm_size &&
+ da >= scp->l1tcm_phys &&
+ (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
+ offset = da - scp->l1tcm_phys;
+ return (void __force *)scp->l1tcm_base + offset;
+ }
+
+ /* optional memory region */
+ if (scp->dram_size &&
+ da >= scp->dma_addr &&
+ (da + len) <= scp->dma_addr + scp->dram_size) {
+ offset = da - scp->dma_addr;
+ return scp->cpu_addr + offset;
+ }
+
+ return NULL;
+}
+
+static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+
+ return scp->data->scp_da_to_va(scp, da, len);
+}
+
+static void mt8183_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8183_WDT_CFG);
+}
+
+static void mt8192_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_stop(struct mtk_scp *scp)
+{
+ /* Disable SRAM clock */
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+ scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
+ MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+ scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+ /* Disable SCP watchdog */
+ writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static int scp_stop(struct rproc *rproc)
+{
+ struct mtk_scp *scp = (struct mtk_scp *)rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ scp->data->scp_reset_assert(scp);
+ scp->data->scp_stop(scp);
+ clk_disable_unprepare(scp->clk);
+
+ return 0;
+}
+
+static const struct rproc_ops scp_ops = {
+ .start = scp_start,
+ .stop = scp_stop,
+ .load = scp_load,
+ .da_to_va = scp_da_to_va,
+ .parse_fw = scp_parse_fw,
+};
+
+/**
+ * scp_get_device() - get device struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct device *scp_get_device(struct mtk_scp *scp)
+{
+ return scp->dev;
+}
+EXPORT_SYMBOL_GPL(scp_get_device);
+
+/**
+ * scp_get_rproc() - get rproc struct of SCP
+ *
+ * @scp: mtk_scp structure
+ **/
+struct rproc *scp_get_rproc(struct mtk_scp *scp)
+{
+ return scp->rproc;
+}
+EXPORT_SYMBOL_GPL(scp_get_rproc);
+
+/**
+ * scp_get_vdec_hw_capa() - get video decoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video decoder hardware capability
+ **/
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.dec_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_vdec_hw_capa);
+
+/**
+ * scp_get_venc_hw_capa() - get video encoder hardware capability
+ *
+ * @scp: mtk_scp structure
+ *
+ * Return: video encoder hardware capability
+ **/
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp)
+{
+ return scp->run.enc_capability;
+}
+EXPORT_SYMBOL_GPL(scp_get_venc_hw_capa);
+
+/**
+ * scp_mapping_dm_addr() - Mapping SRAM/DRAM to kernel virtual address
+ *
+ * @scp: mtk_scp structure
+ * @mem_addr: SCP views memory address
+ *
+ * Mapping the SCP's SRAM address /
+ * DMEM (Data Extended Memory) memory address /
+ * Working buffer memory address to
+ * kernel virtual address.
+ *
+ * Return: Return ERR_PTR(-EINVAL) if mapping failed,
+ * otherwise the mapped kernel virtual address
+ **/
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr)
+{
+ void *ptr;
+
+ ptr = scp_da_to_va(scp->rproc, mem_addr, 0, NULL);
+ if (!ptr)
+ return ERR_PTR(-EINVAL);
+
+ return ptr;
+}
+EXPORT_SYMBOL_GPL(scp_mapping_dm_addr);
+
+static int scp_map_memory_region(struct mtk_scp *scp)
+{
+ int ret;
+
+ ret = of_reserved_mem_device_init(scp->dev);
+
+ /* reserved memory is optional. */
+ if (ret == -ENODEV) {
+ dev_info(scp->dev, "skipping reserved memory initialization.");
+ return 0;
+ }
+
+ if (ret) {
+ dev_err(scp->dev, "failed to assign memory-region: %d\n", ret);
+ return -ENOMEM;
+ }
+
+ /* Reserved SCP code size */
+ scp->dram_size = MAX_CODE_SIZE;
+ scp->cpu_addr = dma_alloc_coherent(scp->dev, scp->dram_size,
+ &scp->dma_addr, GFP_KERNEL);
+ if (!scp->cpu_addr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void scp_unmap_memory_region(struct mtk_scp *scp)
+{
+ if (scp->dram_size == 0)
+ return;
+
+ dma_free_coherent(scp->dev, scp->dram_size, scp->cpu_addr,
+ scp->dma_addr);
+ of_reserved_mem_device_release(scp->dev);
+}
+
+static int scp_register_ipi(struct platform_device *pdev, u32 id,
+ ipi_handler_t handler, void *priv)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_register(scp, id, handler, priv);
+}
+
+static void scp_unregister_ipi(struct platform_device *pdev, u32 id)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ scp_ipi_unregister(scp, id);
+}
+
+static int scp_send_ipi(struct platform_device *pdev, u32 id, void *buf,
+ unsigned int len, unsigned int wait)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+
+ return scp_ipi_send(scp, id, buf, len, wait);
+}
+
+static struct mtk_rpmsg_info mtk_scp_rpmsg_info = {
+ .send_ipi = scp_send_ipi,
+ .register_ipi = scp_register_ipi,
+ .unregister_ipi = scp_unregister_ipi,
+ .ns_ipi_id = SCP_IPI_NS_SERVICE,
+};
+
+static void scp_add_rpmsg_subdev(struct mtk_scp *scp)
+{
+ scp->rpmsg_subdev =
+ mtk_rpmsg_create_rproc_subdev(to_platform_device(scp->dev),
+ &mtk_scp_rpmsg_info);
+ if (scp->rpmsg_subdev)
+ rproc_add_subdev(scp->rproc, scp->rpmsg_subdev);
+}
+
+static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
+{
+ if (scp->rpmsg_subdev) {
+ rproc_remove_subdev(scp->rproc, scp->rpmsg_subdev);
+ mtk_rpmsg_destroy_rproc_subdev(scp->rpmsg_subdev);
+ scp->rpmsg_subdev = NULL;
+ }
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct mtk_scp *scp;
+ struct rproc *rproc;
+ struct resource *res;
+ const char *fw_name = "scp.img";
+ int ret, i;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
+ if (!rproc)
+ return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
+
+ scp = (struct mtk_scp *)rproc->priv;
+ scp->rproc = rproc;
+ scp->dev = dev;
+ scp->data = of_device_get_match_data(dev);
+ platform_set_drvdata(pdev, scp);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
+ scp->sram_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp->sram_base))
+ return dev_err_probe(dev, PTR_ERR(scp->sram_base),
+ "Failed to parse and map sram memory\n");
+
+ scp->sram_size = resource_size(res);
+ scp->sram_phys = res->start;
+
+ /* l1tcm is an optional memory region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
+ scp->l1tcm_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(scp->l1tcm_base)) {
+ ret = PTR_ERR(scp->l1tcm_base);
+ if (ret != -EINVAL) {
+ return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+ }
+ } else {
+ scp->l1tcm_size = resource_size(res);
+ scp->l1tcm_phys = res->start;
+ }
+
+ scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(scp->reg_base))
+ return dev_err_probe(dev, PTR_ERR(scp->reg_base),
+ "Failed to parse and map cfg memory\n");
+
+ ret = scp->data->scp_clk_get(scp);
+ if (ret)
+ return ret;
+
+ ret = scp_map_memory_region(scp);
+ if (ret)
+ return ret;
+
+ mutex_init(&scp->send_lock);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_init(&scp->ipi_desc[i].lock);
+
+ /* register SCP initialization IPI */
+ ret = scp_ipi_register(scp, SCP_IPI_INIT, scp_init_ipi_handler, scp);
+ if (ret) {
+ dev_err(dev, "Failed to register IPI_SCP_INIT\n");
+ goto release_dev_mem;
+ }
+
+ init_waitqueue_head(&scp->run.wq);
+ init_waitqueue_head(&scp->ack_wq);
+
+ scp_add_rpmsg_subdev(scp);
+
+ ret = devm_request_threaded_irq(dev, platform_get_irq(pdev, 0), NULL,
+ scp_irq_handler, IRQF_ONESHOT,
+ pdev->name, scp);
+
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto remove_subdev;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret)
+ goto remove_subdev;
+
+ return 0;
+
+remove_subdev:
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+release_dev_mem:
+ scp_unmap_memory_region(scp);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+
+ return ret;
+}
+
+static int scp_remove(struct platform_device *pdev)
+{
+ struct mtk_scp *scp = platform_get_drvdata(pdev);
+ int i;
+
+ rproc_del(scp->rproc);
+ scp_remove_rpmsg_subdev(scp);
+ scp_ipi_unregister(scp, SCP_IPI_INIT);
+ scp_unmap_memory_region(scp);
+ for (i = 0; i < SCP_IPI_MAX; i++)
+ mutex_destroy(&scp->ipi_desc[i].lock);
+ mutex_destroy(&scp->send_lock);
+
+ return 0;
+}
+
+static const struct mtk_scp_of_data mt8183_of_data = {
+ .scp_clk_get = mt8183_scp_clk_get,
+ .scp_before_load = mt8183_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x7bdb0,
+};
+
+static const struct mtk_scp_of_data mt8186_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8186_scp_before_load,
+ .scp_irq_handler = mt8183_scp_irq_handler,
+ .scp_reset_assert = mt8183_scp_reset_assert,
+ .scp_reset_deassert = mt8183_scp_reset_deassert,
+ .scp_stop = mt8183_scp_stop,
+ .scp_da_to_va = mt8183_scp_da_to_va,
+ .host_to_scp_reg = MT8183_HOST_TO_SCP,
+ .host_to_scp_int_bit = MT8183_HOST_IPC_INT_BIT,
+ .ipi_buf_offset = 0x7bdb0,
+};
+
+static const struct mtk_scp_of_data mt8192_of_data = {
+ .scp_clk_get = mt8192_scp_clk_get,
+ .scp_before_load = mt8192_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8192_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data mt8195_of_data = {
+ .scp_clk_get = mt8195_scp_clk_get,
+ .scp_before_load = mt8195_scp_before_load,
+ .scp_irq_handler = mt8192_scp_irq_handler,
+ .scp_reset_assert = mt8192_scp_reset_assert,
+ .scp_reset_deassert = mt8192_scp_reset_deassert,
+ .scp_stop = mt8195_scp_stop,
+ .scp_da_to_va = mt8192_scp_da_to_va,
+ .host_to_scp_reg = MT8192_GIPC_IN_SET,
+ .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
+};
+
+static const struct of_device_id mtk_scp_of_match[] = {
+ { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
+ { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
+ { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
+ { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
+
+static struct platform_driver mtk_scp_driver = {
+ .probe = scp_probe,
+ .remove = scp_remove,
+ .driver = {
+ .name = "mtk-scp",
+ .of_match_table = mtk_scp_of_match,
+ },
+};
+
+module_platform_driver(mtk_scp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek SCP control driver");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
new file mode 100644
index 000000000000..00f041ebcde6
--- /dev/null
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -0,0 +1,218 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2019 MediaTek Inc.
+
+#include <asm/barrier.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc/mtk_scp.h>
+
+#include "mtk_common.h"
+
+/**
+ * scp_ipi_register() - register an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @handler: IPI handler
+ * @priv: private data for IPI handler
+ *
+ * Register an ipi function to receive ipi interrupt from SCP.
+ *
+ * Return: 0 if ipi registers successfully, -error on error.
+ */
+int scp_ipi_register(struct mtk_scp *scp,
+ u32 id,
+ scp_ipi_handler_t handler,
+ void *priv)
+{
+ if (!scp)
+ return -EPROBE_DEFER;
+
+ if (WARN_ON(id >= SCP_IPI_MAX) || WARN_ON(handler == NULL))
+ return -EINVAL;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = handler;
+ scp->ipi_desc[id].priv = priv;
+ scp_ipi_unlock(scp, id);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_register);
+
+/**
+ * scp_ipi_unregister() - unregister an ipi function
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Unregister an ipi function to receive ipi interrupt from SCP.
+ */
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id)
+{
+ if (!scp)
+ return;
+
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+
+ scp_ipi_lock(scp, id);
+ scp->ipi_desc[id].handler = NULL;
+ scp->ipi_desc[id].priv = NULL;
+ scp_ipi_unlock(scp, id);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unregister);
+
+/*
+ * scp_memcpy_aligned() - Copy src to dst, where dst is in SCP SRAM region.
+ *
+ * @dst: Pointer to the destination buffer, should be in SCP SRAM region.
+ * @src: Pointer to the source buffer.
+ * @len: Length of the source buffer to be copied.
+ *
+ * Since AP access of SCP SRAM don't support byte write, this always write a
+ * full word at a time, and may cause some extra bytes to be written at the
+ * beginning & ending of dst.
+ */
+void scp_memcpy_aligned(void __iomem *dst, const void *src, unsigned int len)
+{
+ void __iomem *ptr;
+ u32 val;
+ unsigned int i = 0, remain;
+
+ if (!IS_ALIGNED((unsigned long)dst, 4)) {
+ ptr = (void __iomem *)ALIGN_DOWN((unsigned long)dst, 4);
+ i = 4 - (dst - ptr);
+ val = readl_relaxed(ptr);
+ memcpy((u8 *)&val + (4 - i), src, i);
+ writel_relaxed(val, ptr);
+ }
+
+ __iowrite32_copy(dst + i, src + i, (len - i) / 4);
+ remain = (len - i) % 4;
+
+ if (remain > 0) {
+ val = readl_relaxed(dst + len - remain);
+ memcpy(&val, src + len - remain, remain);
+ writel_relaxed(val, dst + len - remain);
+ }
+}
+EXPORT_SYMBOL_GPL(scp_memcpy_aligned);
+
+/**
+ * scp_ipi_lock() - Lock before operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_lock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_lock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_lock);
+
+/**
+ * scp_ipi_lock() - Unlock after operations of an IPI ID
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ *
+ * Note: This should not be used by drivers other than mtk_scp.
+ */
+void scp_ipi_unlock(struct mtk_scp *scp, u32 id)
+{
+ if (WARN_ON(id >= SCP_IPI_MAX))
+ return;
+ mutex_unlock(&scp->ipi_desc[id].lock);
+}
+EXPORT_SYMBOL_GPL(scp_ipi_unlock);
+
+/**
+ * scp_ipi_send() - send data from AP to scp.
+ *
+ * @scp: mtk_scp structure
+ * @id: IPI ID
+ * @buf: the data buffer
+ * @len: the data buffer length
+ * @wait: number of msecs to wait for ack. 0 to skip waiting.
+ *
+ * This function is thread-safe. When this function returns,
+ * SCP has received the data and starts the processing.
+ * When the processing completes, IPI handler registered
+ * by scp_ipi_register will be called in interrupt context.
+ *
+ * Return: 0 if sending data successfully, -error on error.
+ **/
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ unsigned int wait)
+{
+ struct mtk_share_obj __iomem *send_obj = scp->send_buf;
+ unsigned long timeout;
+ int ret;
+
+ if (WARN_ON(id <= SCP_IPI_INIT) || WARN_ON(id >= SCP_IPI_MAX) ||
+ WARN_ON(id == SCP_IPI_NS_SERVICE) ||
+ WARN_ON(len > sizeof(send_obj->share_buf)) || WARN_ON(!buf))
+ return -EINVAL;
+
+ mutex_lock(&scp->send_lock);
+
+ ret = clk_prepare_enable(scp->clk);
+ if (ret) {
+ dev_err(scp->dev, "failed to enable clock\n");
+ goto unlock_mutex;
+ }
+
+ /* Wait until SCP receives the last command */
+ timeout = jiffies + msecs_to_jiffies(2000);
+ do {
+ if (time_after(jiffies, timeout)) {
+ dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
+ ret = -ETIMEDOUT;
+ goto clock_disable;
+ }
+ } while (readl(scp->reg_base + scp->data->host_to_scp_reg));
+
+ scp_memcpy_aligned(send_obj->share_buf, buf, len);
+
+ writel(len, &send_obj->len);
+ writel(id, &send_obj->id);
+
+ scp->ipi_id_ack[id] = false;
+ /* send the command to SCP */
+ writel(scp->data->host_to_scp_int_bit,
+ scp->reg_base + scp->data->host_to_scp_reg);
+
+ if (wait) {
+ /* wait for SCP's ACK */
+ timeout = msecs_to_jiffies(wait);
+ ret = wait_event_timeout(scp->ack_wq,
+ scp->ipi_id_ack[id],
+ timeout);
+ scp->ipi_id_ack[id] = false;
+ if (WARN(!ret, "scp ipi %d ack time out !", id))
+ ret = -EIO;
+ else
+ ret = 0;
+ }
+
+clock_disable:
+ clk_disable_unprepare(scp->clk);
+unlock_mutex:
+ mutex_unlock(&scp->send_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(scp_ipi_send);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek scp IPI interface");
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 6398194075aa..32a588fefbdc 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -2,7 +2,7 @@
/*
* OMAP Remote Processor driver
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments Incorporated - http://www.ti.com/
* Copyright (C) 2011 Google, Inc.
*
* Ohad Ben-Cohen <ohad@wizery.com>
@@ -15,31 +15,466 @@
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk/ti.h>
#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
#include <linux/remoteproc.h>
#include <linux/mailbox_client.h>
+#include <linux/omap-iommu.h>
#include <linux/omap-mailbox.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/reset.h>
+#include <clocksource/timer-ti-dm.h>
-#include <linux/platform_data/remoteproc-omap.h>
+#include <linux/platform_data/dmtimer-omap.h>
#include "omap_remoteproc.h"
#include "remoteproc_internal.h"
+/* default auto-suspend delay (ms) */
+#define DEFAULT_AUTOSUSPEND_DELAY 10000
+
+/**
+ * struct omap_rproc_boot_data - boot data structure for the DSP omap rprocs
+ * @syscon: regmap handle for the system control configuration module
+ * @boot_reg: boot register offset within the @syscon regmap
+ * @boot_reg_shift: bit-field shift required for the boot address value in
+ * @boot_reg
+ */
+struct omap_rproc_boot_data {
+ struct regmap *syscon;
+ unsigned int boot_reg;
+ unsigned int boot_reg_shift;
+};
+
+/**
+ * struct omap_rproc_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: bus address used to access the memory region
+ * @dev_addr: device address of the memory region from DSP view
+ * @size: size of the memory region
+ */
+struct omap_rproc_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct omap_rproc_timer - data structure for a timer used by a omap rproc
+ * @odt: timer pointer
+ * @timer_ops: OMAP dmtimer ops for @odt timer
+ * @irq: timer irq
+ */
+struct omap_rproc_timer {
+ struct omap_dm_timer *odt;
+ const struct omap_dm_timer_ops *timer_ops;
+ int irq;
+};
+
/**
* struct omap_rproc - omap remote processor state
* @mbox: mailbox channel handle
* @client: mailbox client to request the mailbox channel
+ * @boot_data: boot data structure for setting processor boot address
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_timers: number of rproc timer(s)
+ * @num_wd_timers: number of rproc watchdog timers
+ * @timers: timer(s) info used by rproc
+ * @autosuspend_delay: auto-suspend delay value to be used for runtime pm
+ * @need_resume: if true a resume is needed in the system resume callback
* @rproc: rproc handle
+ * @reset: reset handle
+ * @pm_comp: completion primitive to sync for suspend response
+ * @fck: functional clock for the remoteproc
+ * @suspend_acked: state machine flag to store the suspend request ack
*/
struct omap_rproc {
struct mbox_chan *mbox;
struct mbox_client client;
+ struct omap_rproc_boot_data *boot_data;
+ struct omap_rproc_mem *mem;
+ int num_mems;
+ int num_timers;
+ int num_wd_timers;
+ struct omap_rproc_timer *timers;
+ int autosuspend_delay;
+ bool need_resume;
struct rproc *rproc;
+ struct reset_control *reset;
+ struct completion pm_comp;
+ struct clk *fck;
+ bool suspend_acked;
};
/**
+ * struct omap_rproc_mem_data - memory definitions for an omap remote processor
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct omap_rproc_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct omap_rproc_dev_data - device data for the omap remote processor
+ * @device_name: device name of the remote processor
+ * @mems: memory definitions for this remote processor
+ */
+struct omap_rproc_dev_data {
+ const char *device_name;
+ const struct omap_rproc_mem_data *mems;
+};
+
+/**
+ * omap_rproc_request_timer() - request a timer for a remoteproc
+ * @dev: device requesting the timer
+ * @np: device node pointer to the desired timer
+ * @timer: handle to a struct omap_rproc_timer to return the timer handle
+ *
+ * This helper function is used primarily to request a timer associated with
+ * a remoteproc. The returned handle is stored in the .odt field of the
+ * @timer structure passed in, and is used to invoke other timer specific
+ * ops (like starting a timer either during device initialization or during
+ * a resume operation, or for stopping/freeing a timer).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_request_timer(struct device *dev, struct device_node *np,
+ struct omap_rproc_timer *timer)
+{
+ int ret;
+
+ timer->odt = timer->timer_ops->request_by_node(np);
+ if (!timer->odt) {
+ dev_err(dev, "request for timer node %p failed\n", np);
+ return -EBUSY;
+ }
+
+ ret = timer->timer_ops->set_source(timer->odt, OMAP_TIMER_SRC_SYS_CLK);
+ if (ret) {
+ dev_err(dev, "error setting OMAP_TIMER_SRC_SYS_CLK as source for timer node %p\n",
+ np);
+ timer->timer_ops->free(timer->odt);
+ return ret;
+ }
+
+ /* clean counter, remoteproc code will set the value */
+ timer->timer_ops->set_load(timer->odt, 0);
+
+ return 0;
+}
+
+/**
+ * omap_rproc_start_timer() - start a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to start a timer associated with a remoteproc,
+ * obtained using the request_timer ops. The helper function needs to be
+ * invoked by the driver to start the timer (during device initialization)
+ * or to just resume the timer.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_start_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->start(timer->odt);
+}
+
+/**
+ * omap_rproc_stop_timer() - stop a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used to disable a timer associated with a
+ * remoteproc, and needs to be called either during a device shutdown
+ * or suspend operation. The separate helper function allows the driver
+ * to just stop a timer without having to release the timer during a
+ * suspend operation.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_stop_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->stop(timer->odt);
+}
+
+/**
+ * omap_rproc_release_timer() - release a timer for a remoteproc
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This helper function is used primarily to release a timer associated
+ * with a remoteproc. The dmtimer will be available for other clients to
+ * use once released.
+ *
+ * Return: 0 on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_release_timer(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->free(timer->odt);
+}
+
+/**
+ * omap_rproc_get_timer_irq() - get the irq for a timer
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to get the irq associated with a watchdog timer. The
+ * function is called by the OMAP remoteproc driver to register a interrupt
+ * handler to handle watchdog events on the remote processor.
+ *
+ * Return: irq id on success, otherwise a failure as returned by DMTimer ops
+ */
+static inline int omap_rproc_get_timer_irq(struct omap_rproc_timer *timer)
+{
+ return timer->timer_ops->get_irq(timer->odt);
+}
+
+/**
+ * omap_rproc_ack_timer_irq() - acknowledge a timer irq
+ * @timer: handle to a OMAP rproc timer
+ *
+ * This function is used to clear the irq associated with a watchdog timer. The
+ * The function is called by the OMAP remoteproc upon a watchdog event on the
+ * remote processor to clear the interrupt status of the watchdog timer.
+ */
+static inline void omap_rproc_ack_timer_irq(struct omap_rproc_timer *timer)
+{
+ timer->timer_ops->write_status(timer->odt, OMAP_TIMER_INT_OVERFLOW);
+}
+
+/**
+ * omap_rproc_watchdog_isr() - Watchdog ISR handler for remoteproc device
+ * @irq: IRQ number associated with a watchdog timer
+ * @data: IRQ handler data
+ *
+ * This ISR routine executes the required necessary low-level code to
+ * acknowledge a watchdog timer interrupt. There can be multiple watchdog
+ * timers associated with a rproc (like IPUs which have 2 watchdog timers,
+ * one per Cortex M3/M4 core), so a lookup has to be performed to identify
+ * the timer to acknowledge its interrupt.
+ *
+ * The function also invokes rproc_report_crash to report the watchdog event
+ * to the remoteproc driver core, to trigger a recovery.
+ *
+ * Return: IRQ_HANDLED on success, otherwise IRQ_NONE
+ */
+static irqreturn_t omap_rproc_watchdog_isr(int irq, void *data)
+{
+ struct rproc *rproc = data;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct omap_rproc_timer *wd_timer = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+ int i;
+
+ for (i = oproc->num_timers; i < num_timers; i++) {
+ if (timers[i].irq > 0 && irq == timers[i].irq) {
+ wd_timer = &timers[i];
+ break;
+ }
+ }
+
+ if (!wd_timer) {
+ dev_err(dev, "invalid timer\n");
+ return IRQ_NONE;
+ }
+
+ omap_rproc_ack_timer_irq(wd_timer);
+
+ rproc_report_crash(rproc, RPROC_WATCHDOG);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * omap_rproc_enable_timers() - enable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to acquire and configure the timer handle
+ *
+ * This function is used primarily to enable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either acquire and start a timer (during device initialization) or
+ * to just start a timer (during a resume operation).
+ *
+ * Return: 0 on success, otherwise an appropriate failure
+ */
+static int omap_rproc_enable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ int ret = 0;
+ struct platform_device *tpdev;
+ struct dmtimer_platform_data *tpdata;
+ const struct omap_dm_timer_ops *timer_ops;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = NULL;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ if (!configure)
+ goto start_timers;
+
+ for (i = 0; i < num_timers; i++) {
+ if (i < oproc->num_timers)
+ np = of_parse_phandle(dev->of_node, "ti,timers", i);
+ else
+ np = of_parse_phandle(dev->of_node,
+ "ti,watchdog-timers",
+ (i - oproc->num_timers));
+ if (!np) {
+ ret = -ENXIO;
+ dev_err(dev, "device node lookup for timer at index %d failed: %d\n",
+ i < oproc->num_timers ? i :
+ i - oproc->num_timers, ret);
+ goto free_timers;
+ }
+
+ tpdev = of_find_device_by_node(np);
+ if (!tpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get timer platform device\n");
+ goto put_node;
+ }
+
+ tpdata = dev_get_platdata(&tpdev->dev);
+ put_device(&tpdev->dev);
+ if (!tpdata) {
+ ret = -EINVAL;
+ dev_err(dev, "dmtimer pdata structure NULL\n");
+ goto put_node;
+ }
+
+ timer_ops = tpdata->timer_ops;
+ if (!timer_ops || !timer_ops->request_by_node ||
+ !timer_ops->set_source || !timer_ops->set_load ||
+ !timer_ops->free || !timer_ops->start ||
+ !timer_ops->stop || !timer_ops->get_irq ||
+ !timer_ops->write_status) {
+ ret = -EINVAL;
+ dev_err(dev, "device does not have required timer ops\n");
+ goto put_node;
+ }
+
+ timers[i].irq = -1;
+ timers[i].timer_ops = timer_ops;
+ ret = omap_rproc_request_timer(dev, np, &timers[i]);
+ if (ret) {
+ dev_err(dev, "request for timer %p failed: %d\n", np,
+ ret);
+ goto put_node;
+ }
+ of_node_put(np);
+
+ if (i >= oproc->num_timers) {
+ timers[i].irq = omap_rproc_get_timer_irq(&timers[i]);
+ if (timers[i].irq < 0) {
+ dev_err(dev, "get_irq for timer %p failed: %d\n",
+ np, timers[i].irq);
+ ret = -EBUSY;
+ goto free_timers;
+ }
+
+ ret = request_irq(timers[i].irq,
+ omap_rproc_watchdog_isr, IRQF_SHARED,
+ "rproc-wdt", rproc);
+ if (ret) {
+ dev_err(dev, "error requesting irq for timer %p\n",
+ np);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ goto free_timers;
+ }
+ }
+ }
+
+start_timers:
+ for (i = 0; i < num_timers; i++) {
+ ret = omap_rproc_start_timer(&timers[i]);
+ if (ret) {
+ dev_err(dev, "start timer %p failed failed: %d\n", np,
+ ret);
+ break;
+ }
+ }
+ if (ret) {
+ while (i >= 0) {
+ omap_rproc_stop_timer(&timers[i]);
+ i--;
+ }
+ goto put_node;
+ }
+ return 0;
+
+put_node:
+ if (configure)
+ of_node_put(np);
+free_timers:
+ while (i--) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+
+ return ret;
+}
+
+/**
+ * omap_rproc_disable_timers() - disable the timers for a remoteproc
+ * @rproc: handle of a remote processor
+ * @configure: boolean flag used to release the timer handle
+ *
+ * This function is used primarily to disable the timers associated with
+ * a remoteproc. The configure flag is provided to allow the driver to
+ * to either stop and release a timer (during device shutdown) or to just
+ * stop a timer (during a suspend operation).
+ *
+ * Return: 0 on success or no timers
+ */
+static int omap_rproc_disable_timers(struct rproc *rproc, bool configure)
+{
+ int i;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_timer *timers = oproc->timers;
+ int num_timers = oproc->num_timers + oproc->num_wd_timers;
+
+ if (!num_timers)
+ return 0;
+
+ for (i = 0; i < num_timers; i++) {
+ omap_rproc_stop_timer(&timers[i]);
+ if (configure) {
+ if (i >= oproc->num_timers)
+ free_irq(timers[i].irq, rproc);
+ omap_rproc_release_timer(&timers[i]);
+ timers[i].odt = NULL;
+ timers[i].timer_ops = NULL;
+ timers[i].irq = -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
* omap_rproc_mbox_callback() - inbound mailbox message handler
* @client: mailbox client pointer used for requesting the mailbox channel
* @data: mailbox payload
@@ -65,13 +500,28 @@ static void omap_rproc_mbox_callback(struct mbox_client *client, void *data)
switch (msg) {
case RP_MBOX_CRASH:
- /* just log this for now. later, we'll also do recovery */
+ /*
+ * remoteproc detected an exception, notify the rproc core.
+ * The remoteproc core will handle the recovery.
+ */
dev_err(dev, "omap rproc %s crashed\n", name);
+ rproc_report_crash(oproc->rproc, RPROC_FATAL_ERROR);
break;
case RP_MBOX_ECHO_REPLY:
dev_info(dev, "received echo reply from %s\n", name);
break;
+ case RP_MBOX_SUSPEND_ACK:
+ case RP_MBOX_SUSPEND_CANCEL:
+ oproc->suspend_acked = msg == RP_MBOX_SUSPEND_ACK;
+ complete(&oproc->pm_comp);
+ break;
default:
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > oproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
/* msg contains the index of the triggered vring */
if (rproc_vq_interrupt(oproc->rproc, msg) == IRQ_NONE)
dev_dbg(dev, "no message was found in vqid %d\n", msg);
@@ -85,11 +535,52 @@ static void omap_rproc_kick(struct rproc *rproc, int vqid)
struct device *dev = rproc->dev.parent;
int ret;
+ /* wake up the rproc before kicking it */
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0)) {
+ dev_err(dev, "pm_runtime_get_sync() failed during kick, ret = %d\n",
+ ret);
+ pm_runtime_put_noidle(dev);
+ return;
+ }
+
/* send the index of the triggered virtqueue in the mailbox payload */
ret = mbox_send_message(oproc->mbox, (void *)vqid);
if (ret < 0)
dev_err(dev, "failed to send mailbox message, status = %d\n",
ret);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+}
+
+/**
+ * omap_rproc_write_dsp_boot_addr() - set boot address for DSP remote processor
+ * @rproc: handle of a remote processor
+ *
+ * Set boot address for a supported DSP remote processor.
+ *
+ * Return: 0 on success, or -EINVAL if boot address is not aligned properly
+ */
+static int omap_rproc_write_dsp_boot_addr(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ struct omap_rproc_boot_data *bdata = oproc->boot_data;
+ u32 offset = bdata->boot_reg;
+ u32 value;
+ u32 mask;
+
+ if (rproc->bootaddr & (SZ_1K - 1)) {
+ dev_err(dev, "invalid boot address 0x%llx, must be aligned on a 1KB boundary\n",
+ rproc->bootaddr);
+ return -EINVAL;
+ }
+
+ value = rproc->bootaddr >> bdata->boot_reg_shift;
+ mask = ~(SZ_1K - 1) >> bdata->boot_reg_shift;
+
+ return regmap_update_bits(bdata->syscon, offset, mask, value);
}
/*
@@ -103,13 +594,14 @@ static int omap_rproc_start(struct rproc *rproc)
{
struct omap_rproc *oproc = rproc->priv;
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
int ret;
struct mbox_client *client = &oproc->client;
- if (pdata->set_bootaddr)
- pdata->set_bootaddr(rproc->bootaddr);
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret)
+ return ret;
+ }
client->dev = dev;
client->tx_done = NULL;
@@ -117,7 +609,7 @@ static int omap_rproc_start(struct rproc *rproc)
client->tx_block = false;
client->knows_txdone = false;
- oproc->mbox = omap_mbox_request_channel(client, pdata->mbox_name);
+ oproc->mbox = mbox_request_channel(client, 0);
if (IS_ERR(oproc->mbox)) {
ret = -EBUSY;
dev_err(dev, "mbox_request_channel failed: %ld\n",
@@ -138,14 +630,34 @@ static int omap_rproc_start(struct rproc *rproc)
goto put_mbox;
}
- ret = pdata->device_enable(pdev);
+ ret = omap_rproc_enable_timers(rproc, true);
if (ret) {
- dev_err(dev, "omap_device_enable failed: %d\n", ret);
+ dev_err(dev, "omap_rproc_enable_timers failed: %d\n", ret);
goto put_mbox;
}
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset control deassert failed: %d\n", ret);
+ goto disable_timers;
+ }
+
+ /*
+ * remote processor is up, so update the runtime pm status and
+ * enable the auto-suspend. The device usage count is incremented
+ * manually for balancing it for auto-suspend
+ */
+ pm_runtime_set_active(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
+disable_timers:
+ omap_rproc_disable_timers(rproc, true);
put_mbox:
mbox_free_channel(oproc->mbox);
return ret;
@@ -155,32 +667,636 @@ put_mbox:
static int omap_rproc_stop(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
- struct platform_device *pdev = to_platform_device(dev);
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
struct omap_rproc *oproc = rproc->priv;
int ret;
- ret = pdata->device_shutdown(pdev);
- if (ret)
+ /*
+ * cancel any possible scheduled runtime suspend by incrementing
+ * the device usage count, and resuming the device. The remoteproc
+ * also needs to be woken up if suspended, to avoid the remoteproc
+ * OS to continue to remember any context that it has saved, and
+ * avoid potential issues in misindentifying a subsequent device
+ * reboot as a power restore boot
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
return ret;
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret)
+ goto out;
+
+ ret = omap_rproc_disable_timers(rproc, true);
+ if (ret)
+ goto enable_device;
mbox_free_channel(oproc->mbox);
+ /*
+ * update the runtime pm states and status now that the remoteproc
+ * has stopped
+ */
+ pm_runtime_disable(dev);
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
+
return 0;
+
+enable_device:
+ reset_control_deassert(oproc->reset);
+out:
+ /* schedule the next auto-suspend */
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ return ret;
+}
+
+/**
+ * omap_rproc_da_to_va() - internal memory translation helper
+ * @rproc: remote processor to apply the address translation for
+ * @da: device address to translate
+ * @len: length of the memory buffer
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ *
+ * Return: translated virtual address in kernel memory space on success,
+ * or NULL on failure.
+ */
+static void *omap_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ int i;
+ u32 offset;
+
+ if (len <= 0)
+ return NULL;
+
+ if (!oproc->num_mems)
+ return NULL;
+
+ for (i = 0; i < oproc->num_mems; i++) {
+ if (da >= oproc->mem[i].dev_addr && da + len <=
+ oproc->mem[i].dev_addr + oproc->mem[i].size) {
+ offset = da - oproc->mem[i].dev_addr;
+ /* __force to make sparse happy with type conversion */
+ return (__force void *)(oproc->mem[i].cpu_addr +
+ offset);
+ }
+ }
+
+ return NULL;
}
static const struct rproc_ops omap_rproc_ops = {
.start = omap_rproc_start,
.stop = omap_rproc_stop,
.kick = omap_rproc_kick,
+ .da_to_va = omap_rproc_da_to_va,
+};
+
+#ifdef CONFIG_PM
+static bool _is_rproc_in_standby(struct omap_rproc *oproc)
+{
+ return ti_clk_is_in_standby(oproc->fck);
+}
+
+/* 1 sec is long enough time to let the remoteproc side suspend the device */
+#define DEF_SUSPEND_TIMEOUT 1000
+static int _omap_rproc_suspend(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ unsigned long to = msecs_to_jiffies(DEF_SUSPEND_TIMEOUT);
+ unsigned long ta = jiffies + to;
+ u32 suspend_msg = auto_suspend ?
+ RP_MBOX_SUSPEND_AUTO : RP_MBOX_SUSPEND_SYSTEM;
+ int ret;
+
+ reinit_completion(&oproc->pm_comp);
+ oproc->suspend_acked = false;
+ ret = mbox_send_message(oproc->mbox, (void *)suspend_msg);
+ if (ret < 0) {
+ dev_err(dev, "PM mbox_send_message failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&oproc->pm_comp, to);
+ if (!oproc->suspend_acked)
+ return -EBUSY;
+
+ /*
+ * The remoteproc side is returning the ACK message before saving the
+ * context, because the context saving is performed within a SYS/BIOS
+ * function, and it cannot have any inter-dependencies against the IPC
+ * layer. Also, as the SYS/BIOS needs to preserve properly the processor
+ * register set, sending this ACK or signalling the completion of the
+ * context save through a shared memory variable can never be the
+ * absolute last thing to be executed on the remoteproc side, and the
+ * MPU cannot use the ACK message as a sync point to put the remoteproc
+ * into reset. The only way to ensure that the remote processor has
+ * completed saving the context is to check that the module has reached
+ * STANDBY state (after saving the context, the SYS/BIOS executes the
+ * appropriate target-specific WFI instruction causing the module to
+ * enter STANDBY).
+ */
+ while (!_is_rproc_in_standby(oproc)) {
+ if (time_after(jiffies, ta))
+ return -ETIME;
+ schedule();
+ }
+
+ ret = reset_control_assert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset assert during suspend failed %d\n", ret);
+ return ret;
+ }
+
+ ret = omap_rproc_disable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "disabling timers during suspend failed %d\n",
+ ret);
+ goto enable_device;
+ }
+
+ /*
+ * IOMMUs would have to be disabled specifically for runtime suspend.
+ * They are handled automatically through System PM callbacks for
+ * regular system suspend
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_deactivate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "iommu domain deactivate failed %d\n",
+ ret);
+ goto enable_timers;
+ }
+ }
+
+ return 0;
+
+enable_timers:
+ /* ignore errors on re-enabling code */
+ omap_rproc_enable_timers(rproc, false);
+enable_device:
+ reset_control_deassert(oproc->reset);
+ return ret;
+}
+
+static int _omap_rproc_resume(struct rproc *rproc, bool auto_suspend)
+{
+ struct device *dev = rproc->dev.parent;
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ /*
+ * IOMMUs would have to be enabled specifically for runtime resume.
+ * They would have been already enabled automatically through System
+ * PM callbacks for regular system resume
+ */
+ if (auto_suspend) {
+ ret = omap_iommu_domain_activate(rproc->domain);
+ if (ret) {
+ dev_err(dev, "omap_iommu activate failed %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* boot address could be lost after suspend, so restore it */
+ if (oproc->boot_data) {
+ ret = omap_rproc_write_dsp_boot_addr(rproc);
+ if (ret) {
+ dev_err(dev, "boot address restore failed %d\n", ret);
+ goto suspend_iommu;
+ }
+ }
+
+ ret = omap_rproc_enable_timers(rproc, false);
+ if (ret) {
+ dev_err(dev, "enabling timers during resume failed %d\n", ret);
+ goto suspend_iommu;
+ }
+
+ ret = reset_control_deassert(oproc->reset);
+ if (ret) {
+ dev_err(dev, "reset deassert during resume failed %d\n", ret);
+ goto disable_timers;
+ }
+
+ return 0;
+
+disable_timers:
+ omap_rproc_disable_timers(rproc, false);
+suspend_iommu:
+ if (auto_suspend)
+ omap_iommu_domain_deactivate(rproc->domain);
+out:
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state == RPROC_SUSPENDED)
+ goto out;
+
+ if (rproc->state != RPROC_RUNNING) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_suspend(rproc, false);
+ if (ret) {
+ dev_err(dev, "suspend failed %d\n", ret);
+ goto out;
+ }
+
+ /*
+ * remoteproc is running at the time of system suspend, so remember
+ * it so as to wake it up during system resume
+ */
+ oproc->need_resume = true;
+ rproc->state = RPROC_SUSPENDED;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int __maybe_unused omap_rproc_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret = 0;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_OFFLINE)
+ goto out;
+
+ if (rproc->state != RPROC_SUSPENDED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * remoteproc was auto-suspended at the time of system suspend,
+ * so no need to wake-up the processor (leave it in suspended
+ * state, will be woken up during a subsequent runtime_resume)
+ */
+ if (!oproc->need_resume)
+ goto out;
+
+ ret = _omap_rproc_resume(rproc, false);
+ if (ret) {
+ dev_err(dev, "resume failed %d\n", ret);
+ goto out;
+ }
+
+ oproc->need_resume = false;
+ rproc->state = RPROC_RUNNING;
+
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_suspend(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ struct omap_rproc *oproc = rproc->priv;
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (rproc->state == RPROC_CRASHED) {
+ dev_dbg(dev, "rproc cannot be runtime suspended when crashed!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (WARN_ON(rproc->state != RPROC_RUNNING)) {
+ dev_err(dev, "rproc cannot be runtime suspended when not running!\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * do not even attempt suspend if the remote processor is not
+ * idled for runtime auto-suspend
+ */
+ if (!_is_rproc_in_standby(oproc)) {
+ ret = -EBUSY;
+ goto abort;
+ }
+
+ ret = _omap_rproc_suspend(rproc, true);
+ if (ret)
+ goto abort;
+
+ rproc->state = RPROC_SUSPENDED;
+ mutex_unlock(&rproc->lock);
+ return 0;
+
+abort:
+ pm_runtime_mark_last_busy(dev);
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+
+static int omap_rproc_runtime_resume(struct device *dev)
+{
+ struct rproc *rproc = dev_get_drvdata(dev);
+ int ret;
+
+ mutex_lock(&rproc->lock);
+ if (WARN_ON(rproc->state != RPROC_SUSPENDED)) {
+ dev_err(dev, "rproc cannot be runtime resumed if not suspended! state=%d\n",
+ rproc->state);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = _omap_rproc_resume(rproc, true);
+ if (ret) {
+ dev_err(dev, "runtime resume failed %d\n", ret);
+ goto out;
+ }
+
+ rproc->state = RPROC_RUNNING;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct omap_rproc_mem_data ipu_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x20000000 },
+ { },
+};
+
+static const struct omap_rproc_mem_data dra7_dsp_mems[] = {
+ { .name = "l2ram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+ { },
+};
+
+static const struct omap_rproc_dev_data omap4_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap4_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data omap5_dsp_dev_data = {
+ .device_name = "dsp",
+};
+
+static const struct omap_rproc_dev_data omap5_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_dsp_dev_data = {
+ .device_name = "dsp",
+ .mems = dra7_dsp_mems,
+};
+
+static const struct omap_rproc_dev_data dra7_ipu_dev_data = {
+ .device_name = "ipu",
+ .mems = ipu_mems,
};
+static const struct of_device_id omap_rproc_of_match[] = {
+ {
+ .compatible = "ti,omap4-dsp",
+ .data = &omap4_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap4-ipu",
+ .data = &omap4_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-dsp",
+ .data = &omap5_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,omap5-ipu",
+ .data = &omap5_ipu_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-dsp",
+ .data = &dra7_dsp_dev_data,
+ },
+ {
+ .compatible = "ti,dra7-ipu",
+ .data = &dra7_ipu_dev_data,
+ },
+ {
+ /* end */
+ },
+};
+MODULE_DEVICE_TABLE(of, omap_rproc_of_match);
+
+static const char *omap_rproc_get_firmware(struct platform_device *pdev)
+{
+ const char *fw_name;
+ int ret;
+
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw_name;
+}
+
+static int omap_rproc_get_boot_data(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ const struct omap_rproc_dev_data *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!of_property_read_bool(np, "ti,bootreg"))
+ return 0;
+
+ oproc->boot_data = devm_kzalloc(&pdev->dev, sizeof(*oproc->boot_data),
+ GFP_KERNEL);
+ if (!oproc->boot_data)
+ return -ENOMEM;
+
+ oproc->boot_data->syscon =
+ syscon_regmap_lookup_by_phandle(np, "ti,bootreg");
+ if (IS_ERR(oproc->boot_data->syscon)) {
+ ret = PTR_ERR(oproc->boot_data->syscon);
+ return ret;
+ }
+
+ if (of_property_read_u32_index(np, "ti,bootreg", 1,
+ &oproc->boot_data->boot_reg)) {
+ dev_err(&pdev->dev, "couldn't get the boot register\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_index(np, "ti,bootreg", 2,
+ &oproc->boot_data->boot_reg_shift);
+
+ return 0;
+}
+
+static int omap_rproc_of_get_internal_memories(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ const struct omap_rproc_dev_data *data;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ if (!data->mems)
+ return 0;
+
+ num_mems = of_property_count_elems_of_size(dev->of_node, "reg",
+ sizeof(u32)) / 2;
+
+ oproc->mem = devm_kcalloc(dev, num_mems, sizeof(*oproc->mem),
+ GFP_KERNEL);
+ if (!oproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; data->mems[i].name; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "no memory defined for %s\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ oproc->mem[i].cpu_addr = devm_ioremap_resource(dev, res);
+ if (IS_ERR(oproc->mem[i].cpu_addr)) {
+ dev_err(dev, "failed to parse and map %s memory\n",
+ data->mems[i].name);
+ return PTR_ERR(oproc->mem[i].cpu_addr);
+ }
+ oproc->mem[i].bus_addr = res->start;
+ oproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ oproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%x va %pK da 0x%x\n",
+ data->mems[i].name, &oproc->mem[i].bus_addr,
+ oproc->mem[i].size, oproc->mem[i].cpu_addr,
+ oproc->mem[i].dev_addr);
+ }
+ oproc->num_mems = num_mems;
+
+ return 0;
+}
+
+#ifdef CONFIG_OMAP_REMOTEPROC_WATCHDOG
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ ret = of_count_phandle_with_args(np, "ti,watchdog-timers", NULL);
+ if (ret <= 0) {
+ dev_dbg(dev, "device does not have watchdog timers, status = %d\n",
+ ret);
+ ret = 0;
+ }
+
+ return ret;
+}
+#else
+static int omap_rproc_count_wdog_timers(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static int omap_rproc_of_get_timers(struct platform_device *pdev,
+ struct rproc *rproc)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct omap_rproc *oproc = rproc->priv;
+ struct device *dev = &pdev->dev;
+ int num_timers;
+
+ /*
+ * Timer nodes are directly used in client nodes as phandles, so
+ * retrieve the count using appropriate size
+ */
+ oproc->num_timers = of_count_phandle_with_args(np, "ti,timers", NULL);
+ if (oproc->num_timers <= 0) {
+ dev_dbg(dev, "device does not have timers, status = %d\n",
+ oproc->num_timers);
+ oproc->num_timers = 0;
+ }
+
+ oproc->num_wd_timers = omap_rproc_count_wdog_timers(dev);
+
+ num_timers = oproc->num_timers + oproc->num_wd_timers;
+ if (num_timers) {
+ oproc->timers = devm_kcalloc(dev, num_timers,
+ sizeof(*oproc->timers),
+ GFP_KERNEL);
+ if (!oproc->timers)
+ return -ENOMEM;
+
+ dev_dbg(dev, "device has %d tick timers and %d watchdog timers\n",
+ oproc->num_timers, oproc->num_wd_timers);
+ }
+
+ return 0;
+}
+
static int omap_rproc_probe(struct platform_device *pdev)
{
- struct omap_rproc_pdata *pdata = pdev->dev.platform_data;
+ struct device_node *np = pdev->dev.of_node;
struct omap_rproc *oproc;
struct rproc *rproc;
+ const char *firmware;
int ret;
+ struct reset_control *reset;
+
+ if (!np) {
+ dev_err(&pdev->dev, "only DT-based devices are supported\n");
+ return -ENODEV;
+ }
+
+ reset = devm_reset_control_array_get_exclusive(&pdev->dev);
+ if (IS_ERR(reset))
+ return PTR_ERR(reset);
+
+ firmware = omap_rproc_get_firmware(pdev);
+ if (IS_ERR(firmware))
+ return PTR_ERR(firmware);
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
@@ -188,24 +1304,60 @@ static int omap_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdata->name, &omap_rproc_ops,
- pdata->firmware, sizeof(*oproc));
+ rproc = rproc_alloc(&pdev->dev, dev_name(&pdev->dev), &omap_rproc_ops,
+ firmware, sizeof(*oproc));
if (!rproc)
return -ENOMEM;
oproc = rproc->priv;
oproc->rproc = rproc;
+ oproc->reset = reset;
/* All existing OMAP IPU and DSP processors have an MMU */
rproc->has_iommu = true;
+ ret = omap_rproc_of_get_internal_memories(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_get_boot_data(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ ret = omap_rproc_of_get_timers(pdev, rproc);
+ if (ret)
+ goto free_rproc;
+
+ init_completion(&oproc->pm_comp);
+ oproc->autosuspend_delay = DEFAULT_AUTOSUSPEND_DELAY;
+
+ of_property_read_u32(pdev->dev.of_node, "ti,autosuspend-delay-ms",
+ &oproc->autosuspend_delay);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, oproc->autosuspend_delay);
+
+ oproc->fck = devm_clk_get(&pdev->dev, 0);
+ if (IS_ERR(oproc->fck)) {
+ ret = PTR_ERR(oproc->fck);
+ goto free_rproc;
+ }
+
+ ret = of_reserved_mem_device_init(&pdev->dev);
+ if (ret) {
+ dev_warn(&pdev->dev, "device does not have specific CMA pool.\n");
+ dev_warn(&pdev->dev, "Typically this should be provided,\n");
+ dev_warn(&pdev->dev, "only omit if you know what you are doing.\n");
+ }
+
platform_set_drvdata(pdev, rproc);
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto release_mem;
return 0;
+release_mem:
+ of_reserved_mem_device_release(&pdev->dev);
free_rproc:
rproc_free(rproc);
return ret;
@@ -217,15 +1369,24 @@ static int omap_rproc_remove(struct platform_device *pdev)
rproc_del(rproc);
rproc_free(rproc);
+ of_reserved_mem_device_release(&pdev->dev);
return 0;
}
+static const struct dev_pm_ops omap_rproc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(omap_rproc_suspend, omap_rproc_resume)
+ SET_RUNTIME_PM_OPS(omap_rproc_runtime_suspend,
+ omap_rproc_runtime_resume, NULL)
+};
+
static struct platform_driver omap_rproc_driver = {
.probe = omap_rproc_probe,
.remove = omap_rproc_remove,
.driver = {
.name = "omap-rproc",
+ .pm = &omap_rproc_pm_ops,
+ .of_match_table = omap_rproc_of_match,
},
};
diff --git a/drivers/remoteproc/omap_remoteproc.h b/drivers/remoteproc/omap_remoteproc.h
index f6d2036d383d..828e13256c02 100644
--- a/drivers/remoteproc/omap_remoteproc.h
+++ b/drivers/remoteproc/omap_remoteproc.h
@@ -1,35 +1,10 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Remote processor messaging
*
- * Copyright (C) 2011 Texas Instruments, Inc.
+ * Copyright (C) 2011-2020 Texas Instruments, Inc.
* Copyright (C) 2011 Google, Inc.
* All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Texas Instruments nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _OMAP_RPMSG_H
@@ -56,6 +31,22 @@
*
* @RP_MBOX_ABORT_REQUEST: a "please crash" request, used for testing the
* recovery mechanism (to some extent).
+ *
+ * @RP_MBOX_SUSPEND_AUTO: auto suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_SYSTEM: system suspend request for the remote processor
+ *
+ * @RP_MBOX_SUSPEND_ACK: successful response from remote processor for a
+ * suspend request
+ *
+ * @RP_MBOX_SUSPEND_CANCEL: a cancel suspend response from a remote processor
+ * on a suspend request
+ *
+ * Introduce new message definitions if any here.
+ *
+ * @RP_MBOX_END_MSG: Indicates end of known/defined messages from remote core
+ * This should be the last definition.
+ *
*/
enum omap_rp_mbox_messages {
RP_MBOX_READY = 0xFFFFFF00,
@@ -64,6 +55,11 @@ enum omap_rp_mbox_messages {
RP_MBOX_ECHO_REQUEST = 0xFFFFFF03,
RP_MBOX_ECHO_REPLY = 0xFFFFFF04,
RP_MBOX_ABORT_REQUEST = 0xFFFFFF05,
+ RP_MBOX_SUSPEND_AUTO = 0xFFFFFF10,
+ RP_MBOX_SUSPEND_SYSTEM = 0xFFFFFF11,
+ RP_MBOX_SUSPEND_ACK = 0xFFFFFF12,
+ RP_MBOX_SUSPEND_CANCEL = 0xFFFFFF13,
+ RP_MBOX_END_MSG = 0xFFFFFF14,
};
#endif /* _OMAP_RPMSG_H */
diff --git a/drivers/remoteproc/pru_rproc.c b/drivers/remoteproc/pru_rproc.c
new file mode 100644
index 000000000000..1777a01fa84e
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.c
@@ -0,0 +1,919 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PRU-ICSS remoteproc driver for various TI SoCs
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author(s):
+ * Suman Anna <s-anna@ti.com>
+ * Andrew F. Davis <afd@ti.com>
+ * Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org> for Texas Instruments
+ */
+
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/pruss_driver.h>
+#include <linux/remoteproc.h>
+
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+#include "pru_rproc.h"
+
+/* PRU_ICSS_PRU_CTRL registers */
+#define PRU_CTRL_CTRL 0x0000
+#define PRU_CTRL_STS 0x0004
+#define PRU_CTRL_WAKEUP_EN 0x0008
+#define PRU_CTRL_CYCLE 0x000C
+#define PRU_CTRL_STALL 0x0010
+#define PRU_CTRL_CTBIR0 0x0020
+#define PRU_CTRL_CTBIR1 0x0024
+#define PRU_CTRL_CTPPR0 0x0028
+#define PRU_CTRL_CTPPR1 0x002C
+
+/* CTRL register bit-fields */
+#define CTRL_CTRL_SOFT_RST_N BIT(0)
+#define CTRL_CTRL_EN BIT(1)
+#define CTRL_CTRL_SLEEPING BIT(2)
+#define CTRL_CTRL_CTR_EN BIT(3)
+#define CTRL_CTRL_SINGLE_STEP BIT(8)
+#define CTRL_CTRL_RUNSTATE BIT(15)
+
+/* PRU_ICSS_PRU_DEBUG registers */
+#define PRU_DEBUG_GPREG(x) (0x0000 + (x) * 4)
+#define PRU_DEBUG_CT_REG(x) (0x0080 + (x) * 4)
+
+/* PRU/RTU/Tx_PRU Core IRAM address masks */
+#define PRU_IRAM_ADDR_MASK 0x3ffff
+#define PRU0_IRAM_ADDR_MASK 0x34000
+#define PRU1_IRAM_ADDR_MASK 0x38000
+#define RTU0_IRAM_ADDR_MASK 0x4000
+#define RTU1_IRAM_ADDR_MASK 0x6000
+#define TX_PRU0_IRAM_ADDR_MASK 0xa000
+#define TX_PRU1_IRAM_ADDR_MASK 0xc000
+
+/* PRU device addresses for various type of PRU RAMs */
+#define PRU_IRAM_DA 0 /* Instruction RAM */
+#define PRU_PDRAM_DA 0 /* Primary Data RAM */
+#define PRU_SDRAM_DA 0x2000 /* Secondary Data RAM */
+#define PRU_SHRDRAM_DA 0x10000 /* Shared Data RAM */
+
+#define MAX_PRU_SYS_EVENTS 160
+
+/**
+ * enum pru_iomem - PRU core memory/register range identifiers
+ *
+ * @PRU_IOMEM_IRAM: PRU Instruction RAM range
+ * @PRU_IOMEM_CTRL: PRU Control register range
+ * @PRU_IOMEM_DEBUG: PRU Debug register range
+ * @PRU_IOMEM_MAX: just keep this one at the end
+ */
+enum pru_iomem {
+ PRU_IOMEM_IRAM = 0,
+ PRU_IOMEM_CTRL,
+ PRU_IOMEM_DEBUG,
+ PRU_IOMEM_MAX,
+};
+
+/**
+ * enum pru_type - PRU core type identifier
+ *
+ * @PRU_TYPE_PRU: Programmable Real-time Unit
+ * @PRU_TYPE_RTU: Auxiliary Programmable Real-Time Unit
+ * @PRU_TYPE_TX_PRU: Transmit Programmable Real-Time Unit
+ * @PRU_TYPE_MAX: just keep this one at the end
+ */
+enum pru_type {
+ PRU_TYPE_PRU = 0,
+ PRU_TYPE_RTU,
+ PRU_TYPE_TX_PRU,
+ PRU_TYPE_MAX,
+};
+
+/**
+ * struct pru_private_data - device data for a PRU core
+ * @type: type of the PRU core (PRU, RTU, Tx_PRU)
+ * @is_k3: flag used to identify the need for special load handling
+ */
+struct pru_private_data {
+ enum pru_type type;
+ unsigned int is_k3 : 1;
+};
+
+/**
+ * struct pru_rproc - PRU remoteproc structure
+ * @id: id of the PRU core within the PRUSS
+ * @dev: PRU core device pointer
+ * @pruss: back-reference to parent PRUSS structure
+ * @rproc: remoteproc pointer for this PRU core
+ * @data: PRU core specific data
+ * @mem_regions: data for each of the PRU memory regions
+ * @fw_name: name of firmware image used during loading
+ * @mapped_irq: virtual interrupt numbers of created fw specific mapping
+ * @pru_interrupt_map: pointer to interrupt mapping description (firmware)
+ * @pru_interrupt_map_sz: pru_interrupt_map size
+ * @dbg_single_step: debug state variable to set PRU into single step mode
+ * @dbg_continuous: debug state variable to restore PRU execution mode
+ * @evt_count: number of mapped events
+ */
+struct pru_rproc {
+ int id;
+ struct device *dev;
+ struct pruss *pruss;
+ struct rproc *rproc;
+ const struct pru_private_data *data;
+ struct pruss_mem_region mem_regions[PRU_IOMEM_MAX];
+ const char *fw_name;
+ unsigned int *mapped_irq;
+ struct pru_irq_rsc *pru_interrupt_map;
+ size_t pru_interrupt_map_sz;
+ u32 dbg_single_step;
+ u32 dbg_continuous;
+ u8 evt_count;
+};
+
+static inline u32 pru_control_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline
+void pru_control_write_reg(struct pru_rproc *pru, unsigned int reg, u32 val)
+{
+ writel_relaxed(val, pru->mem_regions[PRU_IOMEM_CTRL].va + reg);
+}
+
+static inline u32 pru_debug_read_reg(struct pru_rproc *pru, unsigned int reg)
+{
+ return readl_relaxed(pru->mem_regions[PRU_IOMEM_DEBUG].va + reg);
+}
+
+static int regs_show(struct seq_file *s, void *data)
+{
+ struct rproc *rproc = s->private;
+ struct pru_rproc *pru = rproc->priv;
+ int i, nregs = 32;
+ u32 pru_sts;
+ int pru_is_running;
+
+ seq_puts(s, "============== Control Registers ==============\n");
+ seq_printf(s, "CTRL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTRL));
+ pru_sts = pru_control_read_reg(pru, PRU_CTRL_STS);
+ seq_printf(s, "STS (PC) := 0x%08x (0x%08x)\n", pru_sts, pru_sts << 2);
+ seq_printf(s, "WAKEUP_EN := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_WAKEUP_EN));
+ seq_printf(s, "CYCLE := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CYCLE));
+ seq_printf(s, "STALL := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_STALL));
+ seq_printf(s, "CTBIR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR0));
+ seq_printf(s, "CTBIR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTBIR1));
+ seq_printf(s, "CTPPR0 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR0));
+ seq_printf(s, "CTPPR1 := 0x%08x\n",
+ pru_control_read_reg(pru, PRU_CTRL_CTPPR1));
+
+ seq_puts(s, "=============== Debug Registers ===============\n");
+ pru_is_running = pru_control_read_reg(pru, PRU_CTRL_CTRL) &
+ CTRL_CTRL_RUNSTATE;
+ if (pru_is_running) {
+ seq_puts(s, "PRU is executing, cannot print/access debug registers.\n");
+ return 0;
+ }
+
+ for (i = 0; i < nregs; i++) {
+ seq_printf(s, "GPREG%-2d := 0x%08x\tCT_REG%-2d := 0x%08x\n",
+ i, pru_debug_read_reg(pru, PRU_DEBUG_GPREG(i)),
+ i, pru_debug_read_reg(pru, PRU_DEBUG_CT_REG(i)));
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(regs);
+
+/*
+ * Control PRU single-step mode
+ *
+ * This is a debug helper function used for controlling the single-step
+ * mode of the PRU. The PRU Debug registers are not accessible when the
+ * PRU is in RUNNING state.
+ *
+ * Writing a non-zero value sets the PRU into single-step mode irrespective
+ * of its previous state. The PRU mode is saved only on the first set into
+ * a single-step mode. Writing a zero value will restore the PRU into its
+ * original mode.
+ */
+static int pru_rproc_debug_ss_set(void *data, u64 val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+ u32 reg_val;
+
+ val = val ? 1 : 0;
+ if (!val && !pru->dbg_single_step)
+ return 0;
+
+ reg_val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+
+ if (val && !pru->dbg_single_step)
+ pru->dbg_continuous = reg_val;
+
+ if (val)
+ reg_val |= CTRL_CTRL_SINGLE_STEP | CTRL_CTRL_EN;
+ else
+ reg_val = pru->dbg_continuous;
+
+ pru->dbg_single_step = val;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, reg_val);
+
+ return 0;
+}
+
+static int pru_rproc_debug_ss_get(void *data, u64 *val)
+{
+ struct rproc *rproc = data;
+ struct pru_rproc *pru = rproc->priv;
+
+ *val = pru->dbg_single_step;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(pru_rproc_debug_ss_fops, pru_rproc_debug_ss_get,
+ pru_rproc_debug_ss_set, "%llu\n");
+
+/*
+ * Create PRU-specific debugfs entries
+ *
+ * The entries are created only if the parent remoteproc debugfs directory
+ * exists, and will be cleaned up by the remoteproc core.
+ */
+static void pru_rproc_create_debug_entries(struct rproc *rproc)
+{
+ if (!rproc->dbg_dir)
+ return;
+
+ debugfs_create_file("regs", 0400, rproc->dbg_dir,
+ rproc, &regs_fops);
+ debugfs_create_file("single_step", 0600, rproc->dbg_dir,
+ rproc, &pru_rproc_debug_ss_fops);
+}
+
+static void pru_dispose_irq_mapping(struct pru_rproc *pru)
+{
+ if (!pru->mapped_irq)
+ return;
+
+ while (pru->evt_count) {
+ pru->evt_count--;
+ if (pru->mapped_irq[pru->evt_count] > 0)
+ irq_dispose_mapping(pru->mapped_irq[pru->evt_count]);
+ }
+
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+}
+
+/*
+ * Parse the custom PRU interrupt map resource and configure the INTC
+ * appropriately.
+ */
+static int pru_handle_intrmap(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct pru_rproc *pru = rproc->priv;
+ struct pru_irq_rsc *rsc = pru->pru_interrupt_map;
+ struct irq_fwspec fwspec;
+ struct device_node *parent, *irq_parent;
+ int i, ret = 0;
+
+ /* not having pru_interrupt_map is not an error */
+ if (!rsc)
+ return 0;
+
+ /* currently supporting only type 0 */
+ if (rsc->type != 0) {
+ dev_err(dev, "unsupported rsc type: %d\n", rsc->type);
+ return -EINVAL;
+ }
+
+ if (rsc->num_evts > MAX_PRU_SYS_EVENTS)
+ return -EINVAL;
+
+ if (sizeof(*rsc) + rsc->num_evts * sizeof(struct pruss_int_map) !=
+ pru->pru_interrupt_map_sz)
+ return -EINVAL;
+
+ pru->evt_count = rsc->num_evts;
+ pru->mapped_irq = kcalloc(pru->evt_count, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!pru->mapped_irq) {
+ pru->evt_count = 0;
+ return -ENOMEM;
+ }
+
+ /*
+ * parse and fill in system event to interrupt channel and
+ * channel-to-host mapping. The interrupt controller to be used
+ * for these mappings for a given PRU remoteproc is always its
+ * corresponding sibling PRUSS INTC node.
+ */
+ parent = of_get_parent(dev_of_node(pru->dev));
+ if (!parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ irq_parent = of_get_child_by_name(parent, "interrupt-controller");
+ of_node_put(parent);
+ if (!irq_parent) {
+ kfree(pru->mapped_irq);
+ pru->mapped_irq = NULL;
+ pru->evt_count = 0;
+ return -ENODEV;
+ }
+
+ fwspec.fwnode = of_node_to_fwnode(irq_parent);
+ fwspec.param_count = 3;
+ for (i = 0; i < pru->evt_count; i++) {
+ fwspec.param[0] = rsc->pru_intc_map[i].event;
+ fwspec.param[1] = rsc->pru_intc_map[i].chnl;
+ fwspec.param[2] = rsc->pru_intc_map[i].host;
+
+ dev_dbg(dev, "mapping%d: event %d, chnl %d, host %d\n",
+ i, fwspec.param[0], fwspec.param[1], fwspec.param[2]);
+
+ pru->mapped_irq[i] = irq_create_fwspec_mapping(&fwspec);
+ if (!pru->mapped_irq[i]) {
+ dev_err(dev, "failed to get virq for fw mapping %d: event %d chnl %d host %d\n",
+ i, fwspec.param[0], fwspec.param[1],
+ fwspec.param[2]);
+ ret = -EINVAL;
+ goto map_fail;
+ }
+ }
+ of_node_put(irq_parent);
+
+ return ret;
+
+map_fail:
+ pru_dispose_irq_mapping(pru);
+ of_node_put(irq_parent);
+
+ return ret;
+}
+
+static int pru_rproc_start(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+ int ret;
+
+ dev_dbg(dev, "starting %s%d: entry-point = 0x%llx\n",
+ names[pru->data->type], pru->id, (rproc->bootaddr >> 2));
+
+ ret = pru_handle_intrmap(rproc);
+ /*
+ * reset references to pru interrupt map - they will stop being valid
+ * after rproc_start returns
+ */
+ pru->pru_interrupt_map = NULL;
+ pru->pru_interrupt_map_sz = 0;
+ if (ret)
+ return ret;
+
+ val = CTRL_CTRL_EN | ((rproc->bootaddr >> 2) << 16);
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ return 0;
+}
+
+static int pru_rproc_stop(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const char *names[PRU_TYPE_MAX] = { "PRU", "RTU", "Tx_PRU" };
+ u32 val;
+
+ dev_dbg(dev, "stopping %s%d\n", names[pru->data->type], pru->id);
+
+ val = pru_control_read_reg(pru, PRU_CTRL_CTRL);
+ val &= ~CTRL_CTRL_EN;
+ pru_control_write_reg(pru, PRU_CTRL_CTRL, val);
+
+ /* dispose irq mapping - new firmware can provide new mapping */
+ pru_dispose_irq_mapping(pru);
+
+ return 0;
+}
+
+/*
+ * Convert PRU device address (data spaces only) to kernel virtual address.
+ *
+ * Each PRU has access to all data memories within the PRUSS, accessible at
+ * different ranges. So, look through both its primary and secondary Data
+ * RAMs as well as any shared Data RAM to convert a PRU device address to
+ * kernel virtual address. Data RAM0 is primary Data RAM for PRU0 and Data
+ * RAM1 is primary Data RAM for PRU1.
+ */
+static void *pru_d_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ struct pruss_mem_region dram0, dram1, shrd_ram;
+ struct pruss *pruss = pru->pruss;
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ dram0 = pruss->mem_regions[PRUSS_MEM_DRAM0];
+ dram1 = pruss->mem_regions[PRUSS_MEM_DRAM1];
+ /* PRU1 has its local RAM addresses reversed */
+ if (pru->id == 1)
+ swap(dram0, dram1);
+ shrd_ram = pruss->mem_regions[PRUSS_MEM_SHRD_RAM2];
+
+ if (da >= PRU_PDRAM_DA && da + len <= PRU_PDRAM_DA + dram0.size) {
+ offset = da - PRU_PDRAM_DA;
+ va = (__force void *)(dram0.va + offset);
+ } else if (da >= PRU_SDRAM_DA &&
+ da + len <= PRU_SDRAM_DA + dram1.size) {
+ offset = da - PRU_SDRAM_DA;
+ va = (__force void *)(dram1.va + offset);
+ } else if (da >= PRU_SHRDRAM_DA &&
+ da + len <= PRU_SHRDRAM_DA + shrd_ram.size) {
+ offset = da - PRU_SHRDRAM_DA;
+ va = (__force void *)(shrd_ram.va + offset);
+ }
+
+ return va;
+}
+
+/*
+ * Convert PRU device address (instruction space) to kernel virtual address.
+ *
+ * A PRU does not have an unified address space. Each PRU has its very own
+ * private Instruction RAM, and its device address is identical to that of
+ * its primary Data RAM device address.
+ */
+static void *pru_i_da_to_va(struct pru_rproc *pru, u32 da, size_t len)
+{
+ u32 offset;
+ void *va = NULL;
+
+ if (len == 0)
+ return NULL;
+
+ /*
+ * GNU binutils do not support multiple address spaces. The GNU
+ * linker's default linker script places IRAM at an arbitrary high
+ * offset, in order to differentiate it from DRAM. Hence we need to
+ * strip the artificial offset in the IRAM addresses coming from the
+ * ELF file.
+ *
+ * The TI proprietary linker would never set those higher IRAM address
+ * bits anyway. PRU architecture limits the program counter to 16-bit
+ * word-address range. This in turn corresponds to 18-bit IRAM
+ * byte-address range for ELF.
+ *
+ * Two more bits are added just in case to make the final 20-bit mask.
+ * Idea is to have a safeguard in case TI decides to add banking
+ * in future SoCs.
+ */
+ da &= 0xfffff;
+
+ if (da >= PRU_IRAM_DA &&
+ da + len <= PRU_IRAM_DA + pru->mem_regions[PRU_IOMEM_IRAM].size) {
+ offset = da - PRU_IRAM_DA;
+ va = (__force void *)(pru->mem_regions[PRU_IOMEM_IRAM].va +
+ offset);
+ }
+
+ return va;
+}
+
+/*
+ * Provide address translations for only PRU Data RAMs through the remoteproc
+ * core for any PRU client drivers. The PRU Instruction RAM access is restricted
+ * only to the PRU loader code.
+ */
+static void *pru_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct pru_rproc *pru = rproc->priv;
+
+ return pru_d_da_to_va(pru, da, len);
+}
+
+/* PRU-specific address translator used by PRU loader. */
+static void *pru_da_to_va(struct rproc *rproc, u64 da, size_t len, bool is_iram)
+{
+ struct pru_rproc *pru = rproc->priv;
+ void *va;
+
+ if (is_iram)
+ va = pru_i_da_to_va(pru, da, len);
+ else
+ va = pru_d_da_to_va(pru, da, len);
+
+ return va;
+}
+
+static struct rproc_ops pru_rproc_ops = {
+ .start = pru_rproc_start,
+ .stop = pru_rproc_stop,
+ .da_to_va = pru_rproc_da_to_va,
+};
+
+/*
+ * Custom memory copy implementation for ICSSG PRU/RTU/Tx_PRU Cores
+ *
+ * The ICSSG PRU/RTU/Tx_PRU cores have a memory copying issue with IRAM
+ * memories, that is not seen on previous generation SoCs. The data is reflected
+ * properly in the IRAM memories only for integer (4-byte) copies. Any unaligned
+ * copies result in all the other pre-existing bytes zeroed out within that
+ * 4-byte boundary, thereby resulting in wrong text/code in the IRAMs. Also, the
+ * IRAM memory port interface does not allow any 8-byte copies (as commonly used
+ * by ARM64 memcpy implementation) and throws an exception. The DRAM memory
+ * ports do not show this behavior.
+ */
+static int pru_rproc_memcpy(void *dest, const void *src, size_t count)
+{
+ const u32 *s = src;
+ u32 *d = dest;
+ size_t size = count / 4;
+ u32 *tmp_src = NULL;
+
+ /*
+ * TODO: relax limitation of 4-byte aligned dest addresses and copy
+ * sizes
+ */
+ if ((long)dest % 4 || count % 4)
+ return -EINVAL;
+
+ /* src offsets in ELF firmware image can be non-aligned */
+ if ((long)src % 4) {
+ tmp_src = kmemdup(src, count, GFP_KERNEL);
+ if (!tmp_src)
+ return -ENOMEM;
+ s = tmp_src;
+ }
+
+ while (size--)
+ *d++ = *s++;
+
+ kfree(tmp_src);
+
+ return 0;
+}
+
+static int
+pru_rproc_load_elf_segments(struct rproc *rproc, const struct firmware *fw)
+{
+ struct pru_rproc *pru = rproc->priv;
+ struct device *dev = &rproc->dev;
+ struct elf32_hdr *ehdr;
+ struct elf32_phdr *phdr;
+ int i, ret = 0;
+ const u8 *elf_data = fw->data;
+
+ ehdr = (struct elf32_hdr *)elf_data;
+ phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+
+ /* go through the available ELF segments */
+ for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
+ u32 da = phdr->p_paddr;
+ u32 memsz = phdr->p_memsz;
+ u32 filesz = phdr->p_filesz;
+ u32 offset = phdr->p_offset;
+ bool is_iram;
+ void *ptr;
+
+ if (phdr->p_type != PT_LOAD || !filesz)
+ continue;
+
+ dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
+ phdr->p_type, da, memsz, filesz);
+
+ if (filesz > memsz) {
+ dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ filesz, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (offset + filesz > fw->size) {
+ dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ offset + filesz, fw->size);
+ ret = -EINVAL;
+ break;
+ }
+
+ /* grab the kernel address for this device address */
+ is_iram = phdr->p_flags & PF_X;
+ ptr = pru_da_to_va(rproc, da, memsz, is_iram);
+ if (!ptr) {
+ dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (pru->data->is_k3) {
+ ret = pru_rproc_memcpy(ptr, elf_data + phdr->p_offset,
+ filesz);
+ if (ret) {
+ dev_err(dev, "PRU memory copy failed for da 0x%x memsz 0x%x\n",
+ da, memsz);
+ break;
+ }
+ } else {
+ memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ }
+
+ /* skip the memzero logic performed by remoteproc ELF loader */
+ }
+
+ return ret;
+}
+
+static const void *
+pru_rproc_find_interrupt_map(struct device *dev, const struct firmware *fw)
+{
+ struct elf32_shdr *shdr, *name_table_shdr;
+ const char *name_table;
+ const u8 *elf_data = fw->data;
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)elf_data;
+ u16 shnum = ehdr->e_shnum;
+ u16 shstrndx = ehdr->e_shstrndx;
+ int i;
+
+ /* first, get the section header */
+ shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
+ /* compute name table section header entry in shdr array */
+ name_table_shdr = shdr + shstrndx;
+ /* finally, compute the name table section address in elf */
+ name_table = elf_data + name_table_shdr->sh_offset;
+
+ for (i = 0; i < shnum; i++, shdr++) {
+ u32 size = shdr->sh_size;
+ u32 offset = shdr->sh_offset;
+ u32 name = shdr->sh_name;
+
+ if (strcmp(name_table + name, ".pru_irq_map"))
+ continue;
+
+ /* make sure we have the entire irq map */
+ if (offset + size > fw->size || offset + size < size) {
+ dev_err(dev, ".pru_irq_map section truncated\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* make sure irq map has at least the header */
+ if (sizeof(struct pru_irq_rsc) > size) {
+ dev_err(dev, "header-less .pru_irq_map section\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return shdr;
+ }
+
+ dev_dbg(dev, "no .pru_irq_map section found for this fw\n");
+
+ return NULL;
+}
+
+/*
+ * Use a custom parse_fw callback function for dealing with PRU firmware
+ * specific sections.
+ *
+ * The firmware blob can contain optional ELF sections: .resource_table section
+ * and .pru_irq_map one. The second one contains the PRUSS interrupt mapping
+ * description, which needs to be setup before powering on the PRU core. To
+ * avoid RAM wastage this ELF section is not mapped to any ELF segment (by the
+ * firmware linker) and therefore is not loaded to PRU memory.
+ */
+static int pru_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ struct device *dev = &rproc->dev;
+ struct pru_rproc *pru = rproc->priv;
+ const u8 *elf_data = fw->data;
+ const void *shdr;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
+ int ret;
+
+ /* load optional rsc table */
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret == -EINVAL)
+ dev_dbg(&rproc->dev, "no resource table found for this fw\n");
+ else if (ret)
+ return ret;
+
+ /* find .pru_interrupt_map section, not having it is not an error */
+ shdr = pru_rproc_find_interrupt_map(dev, fw);
+ if (IS_ERR(shdr))
+ return PTR_ERR(shdr);
+
+ if (!shdr)
+ return 0;
+
+ /* preserve pointer to PRU interrupt map together with it size */
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ pru->pru_interrupt_map = (struct pru_irq_rsc *)(elf_data + sh_offset);
+ pru->pru_interrupt_map_sz = elf_shdr_get_sh_size(class, shdr);
+
+ return 0;
+}
+
+/*
+ * Compute PRU id based on the IRAM addresses. The PRU IRAMs are
+ * always at a particular offset within the PRUSS address space.
+ */
+static int pru_rproc_set_id(struct pru_rproc *pru)
+{
+ int ret = 0;
+
+ switch (pru->mem_regions[PRU_IOMEM_IRAM].pa & PRU_IRAM_ADDR_MASK) {
+ case TX_PRU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU0_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU0_IRAM_ADDR_MASK:
+ pru->id = 0;
+ break;
+ case TX_PRU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case RTU1_IRAM_ADDR_MASK:
+ fallthrough;
+ case PRU1_IRAM_ADDR_MASK:
+ pru->id = 1;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int pru_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct platform_device *ppdev = to_platform_device(dev->parent);
+ struct pru_rproc *pru;
+ const char *fw_name;
+ struct rproc *rproc = NULL;
+ struct resource *res;
+ int i, ret;
+ const struct pru_private_data *data;
+ const char *mem_names[PRU_IOMEM_MAX] = { "iram", "control", "debug" };
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = of_property_read_string(np, "firmware-name", &fw_name);
+ if (ret) {
+ dev_err(dev, "unable to retrieve firmware-name %d\n", ret);
+ return ret;
+ }
+
+ rproc = devm_rproc_alloc(dev, pdev->name, &pru_rproc_ops, fw_name,
+ sizeof(*pru));
+ if (!rproc) {
+ dev_err(dev, "rproc_alloc failed\n");
+ return -ENOMEM;
+ }
+ /* use a custom load function to deal with PRU-specific quirks */
+ rproc->ops->load = pru_rproc_load_elf_segments;
+
+ /* use a custom parse function to deal with PRU-specific resources */
+ rproc->ops->parse_fw = pru_rproc_parse_fw;
+
+ /* error recovery is not supported for PRUs */
+ rproc->recovery_disabled = true;
+
+ /*
+ * rproc_add will auto-boot the processor normally, but this is not
+ * desired with PRU client driven boot-flow methodology. A PRU
+ * application/client driver will boot the corresponding PRU
+ * remote-processor as part of its state machine either through the
+ * remoteproc sysfs interface or through the equivalent kernel API.
+ */
+ rproc->auto_boot = false;
+
+ pru = rproc->priv;
+ pru->dev = dev;
+ pru->data = data;
+ pru->pruss = platform_get_drvdata(ppdev);
+ pru->rproc = rproc;
+ pru->fw_name = fw_name;
+
+ for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ pru->mem_regions[i].va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pru->mem_regions[i].va)) {
+ dev_err(dev, "failed to parse and map memory resource %d %s\n",
+ i, mem_names[i]);
+ ret = PTR_ERR(pru->mem_regions[i].va);
+ return ret;
+ }
+ pru->mem_regions[i].pa = res->start;
+ pru->mem_regions[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: pa %pa size 0x%zx va %pK\n",
+ mem_names[i], &pru->mem_regions[i].pa,
+ pru->mem_regions[i].size, pru->mem_regions[i].va);
+ }
+
+ ret = pru_rproc_set_id(pru);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, rproc);
+
+ ret = devm_rproc_add(dev, pru->rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed: %d\n", ret);
+ return ret;
+ }
+
+ pru_rproc_create_debug_entries(rproc);
+
+ dev_dbg(dev, "PRU rproc node %pOF probed successfully\n", np);
+
+ return 0;
+}
+
+static int pru_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rproc *rproc = platform_get_drvdata(pdev);
+
+ dev_dbg(dev, "%s: removing rproc %s\n", __func__, rproc->name);
+
+ return 0;
+}
+
+static const struct pru_private_data pru_data = {
+ .type = PRU_TYPE_PRU,
+};
+
+static const struct pru_private_data k3_pru_data = {
+ .type = PRU_TYPE_PRU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_rtu_data = {
+ .type = PRU_TYPE_RTU,
+ .is_k3 = 1,
+};
+
+static const struct pru_private_data k3_tx_pru_data = {
+ .type = PRU_TYPE_TX_PRU,
+ .is_k3 = 1,
+};
+
+static const struct of_device_id pru_rproc_match[] = {
+ { .compatible = "ti,am3356-pru", .data = &pru_data },
+ { .compatible = "ti,am4376-pru", .data = &pru_data },
+ { .compatible = "ti,am5728-pru", .data = &pru_data },
+ { .compatible = "ti,am642-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am642-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am642-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,k2g-pru", .data = &pru_data },
+ { .compatible = "ti,am654-pru", .data = &k3_pru_data },
+ { .compatible = "ti,am654-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,am654-tx-pru", .data = &k3_tx_pru_data },
+ { .compatible = "ti,j721e-pru", .data = &k3_pru_data },
+ { .compatible = "ti,j721e-rtu", .data = &k3_rtu_data },
+ { .compatible = "ti,j721e-tx-pru", .data = &k3_tx_pru_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pru_rproc_match);
+
+static struct platform_driver pru_rproc_driver = {
+ .driver = {
+ .name = "pru-rproc",
+ .of_match_table = pru_rproc_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = pru_rproc_probe,
+ .remove = pru_rproc_remove,
+};
+module_platform_driver(pru_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_AUTHOR("Grzegorz Jaszczyk <grzegorz.jaszczyk@linaro.org>");
+MODULE_DESCRIPTION("PRU-ICSS Remote Processor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/pru_rproc.h b/drivers/remoteproc/pru_rproc.h
new file mode 100644
index 000000000000..8ee9c3171610
--- /dev/null
+++ b/drivers/remoteproc/pru_rproc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * PRUSS Remote Processor specific types
+ *
+ * Copyright (C) 2014-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef _PRU_RPROC_H_
+#define _PRU_RPROC_H_
+
+/**
+ * struct pruss_int_map - PRU system events _to_ channel and host mapping
+ * @event: number of the system event
+ * @chnl: channel number assigned to a given @event
+ * @host: host number assigned to a given @chnl
+ *
+ * PRU system events are mapped to channels, and these channels are mapped
+ * to host interrupts. Events can be mapped to channels in a one-to-one or
+ * many-to-one ratio (multiple events per channel), and channels can be
+ * mapped to host interrupts in a one-to-one or many-to-one ratio (multiple
+ * channels per interrupt).
+ */
+struct pruss_int_map {
+ u8 event;
+ u8 chnl;
+ u8 host;
+};
+
+/**
+ * struct pru_irq_rsc - PRU firmware section header for IRQ data
+ * @type: resource type
+ * @num_evts: number of described events
+ * @pru_intc_map: PRU interrupt routing description
+ *
+ * The PRU firmware blob can contain optional .pru_irq_map ELF section, which
+ * provides the PRUSS interrupt mapping description. The pru_irq_rsc struct
+ * describes resource entry format.
+ */
+struct pru_irq_rsc {
+ u8 type;
+ u8 num_evts;
+ struct pruss_int_map pru_intc_map[];
+} __packed;
+
+#endif /* _PRU_RPROC_H_ */
diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c
index 60650bcc8c67..4b91e3c9eafa 100644
--- a/drivers/remoteproc/qcom_common.c
+++ b/drivers/remoteproc/qcom_common.c
@@ -12,9 +12,12 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/remoteproc.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include <linux/rpmsg/qcom_glink.h>
#include <linux/rpmsg/qcom_smd.h>
+#include <linux/slab.h>
#include <linux/soc/qcom/mdt_loader.h>
+#include <linux/soc/qcom/smem.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
@@ -23,7 +26,159 @@
#define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev)
#define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev)
-static BLOCKING_NOTIFIER_HEAD(ssr_notifiers);
+#define MAX_NUM_OF_SS 10
+#define MAX_REGION_NAME_LENGTH 16
+#define SBL_MINIDUMP_SMEM_ID 602
+#define MD_REGION_VALID ('V' << 24 | 'A' << 16 | 'L' << 8 | 'I' << 0)
+#define MD_SS_ENCR_DONE ('D' << 24 | 'O' << 16 | 'N' << 8 | 'E' << 0)
+#define MD_SS_ENABLED ('E' << 24 | 'N' << 16 | 'B' << 8 | 'L' << 0)
+
+/**
+ * struct minidump_region - Minidump region
+ * @name : Name of the region to be dumped
+ * @seq_num: : Use to differentiate regions with same name.
+ * @valid : This entry to be dumped (if set to 1)
+ * @address : Physical address of region to be dumped
+ * @size : Size of the region
+ */
+struct minidump_region {
+ char name[MAX_REGION_NAME_LENGTH];
+ __le32 seq_num;
+ __le32 valid;
+ __le64 address;
+ __le64 size;
+};
+
+/**
+ * struct minidump_subsystem_toc: Subsystem's SMEM Table of content
+ * @status : Subsystem toc init status
+ * @enabled : if set to 1, this region would be copied during coredump
+ * @encryption_status: Encryption status for this subsystem
+ * @encryption_required : Decides to encrypt the subsystem regions or not
+ * @region_count : Number of regions added in this subsystem toc
+ * @regions_baseptr : regions base pointer of the subsystem
+ */
+struct minidump_subsystem {
+ __le32 status;
+ __le32 enabled;
+ __le32 encryption_status;
+ __le32 encryption_required;
+ __le32 region_count;
+ __le64 regions_baseptr;
+};
+
+/**
+ * struct minidump_global_toc: Global Table of Content
+ * @status : Global Minidump init status
+ * @md_revision : Minidump revision
+ * @enabled : Minidump enable status
+ * @subsystems : Array of subsystems toc
+ */
+struct minidump_global_toc {
+ __le32 status;
+ __le32 md_revision;
+ __le32 enabled;
+ struct minidump_subsystem subsystems[MAX_NUM_OF_SS];
+};
+
+struct qcom_ssr_subsystem {
+ const char *name;
+ struct srcu_notifier_head notifier_list;
+ struct list_head list;
+};
+
+static LIST_HEAD(qcom_ssr_subsystem_list);
+static DEFINE_MUTEX(qcom_ssr_subsys_lock);
+
+static void qcom_minidump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry->priv);
+ kfree(entry);
+ }
+}
+
+static int qcom_add_minidump_segments(struct rproc *rproc, struct minidump_subsystem *subsystem)
+{
+ struct minidump_region __iomem *ptr;
+ struct minidump_region region;
+ int seg_cnt, i;
+ dma_addr_t da;
+ size_t size;
+ char *name;
+
+ if (WARN_ON(!list_empty(&rproc->dump_segments))) {
+ dev_err(&rproc->dev, "dump segment list already populated\n");
+ return -EUCLEAN;
+ }
+
+ seg_cnt = le32_to_cpu(subsystem->region_count);
+ ptr = ioremap((unsigned long)le64_to_cpu(subsystem->regions_baseptr),
+ seg_cnt * sizeof(struct minidump_region));
+ if (!ptr)
+ return -EFAULT;
+
+ for (i = 0; i < seg_cnt; i++) {
+ memcpy_fromio(&region, ptr + i, sizeof(region));
+ if (region.valid == MD_REGION_VALID) {
+ name = kstrdup(region.name, GFP_KERNEL);
+ if (!name) {
+ iounmap(ptr);
+ return -ENOMEM;
+ }
+ da = le64_to_cpu(region.address);
+ size = le32_to_cpu(region.size);
+ rproc_coredump_add_custom_segment(rproc, da, size, NULL, name);
+ }
+ }
+
+ iounmap(ptr);
+ return 0;
+}
+
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id)
+{
+ int ret;
+ struct minidump_subsystem *subsystem;
+ struct minidump_global_toc *toc;
+
+ /* Get Global minidump ToC*/
+ toc = qcom_smem_get(QCOM_SMEM_HOST_ANY, SBL_MINIDUMP_SMEM_ID, NULL);
+
+ /* check if global table pointer exists and init is set */
+ if (IS_ERR(toc) || !toc->status) {
+ dev_err(&rproc->dev, "Minidump TOC not found in SMEM\n");
+ return;
+ }
+
+ /* Get subsystem table of contents using the minidump id */
+ subsystem = &toc->subsystems[minidump_id];
+
+ /**
+ * Collect minidump if SS ToC is valid and segment table
+ * is initialized in memory and encryption status is set.
+ */
+ if (subsystem->regions_baseptr == 0 ||
+ le32_to_cpu(subsystem->status) != 1 ||
+ le32_to_cpu(subsystem->enabled) != MD_SS_ENABLED ||
+ le32_to_cpu(subsystem->encryption_status) != MD_SS_ENCR_DONE) {
+ dev_err(&rproc->dev, "Minidump not ready, skipping\n");
+ return;
+ }
+
+ ret = qcom_add_minidump_segments(rproc, subsystem);
+ if (ret) {
+ dev_err(&rproc->dev, "Failed with error: %d while adding minidump entries\n", ret);
+ goto clean_minidump;
+ }
+ rproc_coredump_using_sections(rproc);
+clean_minidump:
+ qcom_minidump_cleanup(rproc);
+}
+EXPORT_SYMBOL_GPL(qcom_minidump);
static int glink_subdev_start(struct rproc_subdev *subdev)
{
@@ -42,12 +197,21 @@ static void glink_subdev_stop(struct rproc_subdev *subdev, bool crashed)
glink->edge = NULL;
}
+static void glink_subdev_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_glink *glink = to_glink_subdev(subdev);
+
+ qcom_glink_ssr_notify(glink->ssr_name);
+}
+
/**
* qcom_add_glink_subdev() - try to add a GLINK subdevice to rproc
* @rproc: rproc handle to parent the subdevice
* @glink: reference to a GLINK subdev context
+ * @ssr_name: identifier of the associated remoteproc for ssr notifications
*/
-void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name)
{
struct device *dev = &rproc->dev;
@@ -55,9 +219,14 @@ void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink)
if (!glink->node)
return;
+ glink->ssr_name = kstrdup_const(ssr_name, GFP_KERNEL);
+ if (!glink->ssr_name)
+ return;
+
glink->dev = dev;
glink->subdev.start = glink_subdev_start;
glink->subdev.stop = glink_subdev_stop;
+ glink->subdev.unprepare = glink_subdev_unprepare;
rproc_add_subdev(rproc, &glink->subdev);
}
@@ -74,6 +243,7 @@ void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glin
return;
rproc_remove_subdev(rproc, &glink->subdev);
+ kfree_const(glink->ssr_name);
of_node_put(glink->node);
}
EXPORT_SYMBOL_GPL(qcom_remove_glink_subdev);
@@ -174,37 +344,122 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd)
}
EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev);
+static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name)
+{
+ struct qcom_ssr_subsystem *info;
+
+ mutex_lock(&qcom_ssr_subsys_lock);
+ /* Match in the global qcom_ssr_subsystem_list with name */
+ list_for_each_entry(info, &qcom_ssr_subsystem_list, list)
+ if (!strcmp(info->name, name))
+ goto out;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ info = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ info->name = kstrdup_const(name, GFP_KERNEL);
+ srcu_init_notifier_head(&info->notifier_list);
+
+ /* Add to global notification list */
+ list_add_tail(&info->list, &qcom_ssr_subsystem_list);
+
+out:
+ mutex_unlock(&qcom_ssr_subsys_lock);
+ return info;
+}
+
/**
* qcom_register_ssr_notifier() - register SSR notification handler
- * @nb: notifier_block to notify for restart notifications
+ * @name: Subsystem's SSR name
+ * @nb: notifier_block to be invoked upon subsystem's state change
*
- * Returns 0 on success, negative errno on failure.
+ * This registers the @nb notifier block as part the notifier chain for a
+ * remoteproc associated with @name. The notifier block's callback
+ * will be invoked when the remote processor's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*
- * This register the @notify function as handler for restart notifications. As
- * remote processors are stopped this function will be called, with the SSR
- * name passed as a parameter.
+ * Return: a subsystem cookie on success, ERR_PTR on failure.
*/
-int qcom_register_ssr_notifier(struct notifier_block *nb)
+void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb)
{
- return blocking_notifier_chain_register(&ssr_notifiers, nb);
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(name);
+ if (IS_ERR(info))
+ return info;
+
+ srcu_notifier_chain_register(&info->notifier_list, nb);
+
+ return &info->notifier_list;
}
EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier);
/**
* qcom_unregister_ssr_notifier() - unregister SSR notification handler
+ * @notify: subsystem cookie returned from qcom_register_ssr_notifier
* @nb: notifier_block to unregister
+ *
+ * This function will unregister the notifier from the particular notifier
+ * chain.
+ *
+ * Return: 0 on success, %ENOENT otherwise.
*/
-void qcom_unregister_ssr_notifier(struct notifier_block *nb)
+int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb)
{
- blocking_notifier_chain_unregister(&ssr_notifiers, nb);
+ return srcu_notifier_chain_unregister(notify, nb);
}
EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier);
+static int ssr_notify_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_POWERUP, &data);
+ return 0;
+}
+
+static int ssr_notify_start(struct rproc_subdev *subdev)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_POWERUP, &data);
+ return 0;
+}
+
+static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed)
+{
+ struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = crashed,
+ };
+
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_BEFORE_SHUTDOWN, &data);
+}
+
static void ssr_notify_unprepare(struct rproc_subdev *subdev)
{
struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev);
+ struct qcom_ssr_notify_data data = {
+ .name = ssr->info->name,
+ .crashed = false,
+ };
- blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name);
+ srcu_notifier_call_chain(&ssr->info->notifier_list,
+ QCOM_SSR_AFTER_SHUTDOWN, &data);
}
/**
@@ -214,12 +469,24 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev)
* @ssr_name: identifier to use for notifications originating from @rproc
*
* As the @ssr is registered with the @rproc SSR events will be sent to all
- * registered listeners in the system as the remoteproc is shut down.
+ * registered listeners for the remoteproc when it's SSR events occur
+ * (pre/post startup and pre/post shutdown).
*/
void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr,
const char *ssr_name)
{
- ssr->name = ssr_name;
+ struct qcom_ssr_subsystem *info;
+
+ info = qcom_ssr_get_subsys(ssr_name);
+ if (IS_ERR(info)) {
+ dev_err(&rproc->dev, "Failed to add ssr subdevice\n");
+ return;
+ }
+
+ ssr->info = info;
+ ssr->subdev.prepare = ssr_notify_prepare;
+ ssr->subdev.start = ssr_notify_start;
+ ssr->subdev.stop = ssr_notify_stop;
ssr->subdev.unprepare = ssr_notify_unprepare;
rproc_add_subdev(rproc, &ssr->subdev);
@@ -234,6 +501,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev);
void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr)
{
rproc_remove_subdev(rproc, &ssr->subdev);
+ ssr->info = NULL;
}
EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev);
diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h
index 58de71e4781c..c35adf730be0 100644
--- a/drivers/remoteproc/qcom_common.h
+++ b/drivers/remoteproc/qcom_common.h
@@ -11,6 +11,8 @@ struct qcom_sysmon;
struct qcom_rproc_glink {
struct rproc_subdev subdev;
+ const char *ssr_name;
+
struct device *dev;
struct device_node *node;
struct qcom_glink *edge;
@@ -24,13 +26,17 @@ struct qcom_rproc_subdev {
struct qcom_smd_edge *edge;
};
+struct qcom_ssr_subsystem;
+
struct qcom_rproc_ssr {
struct rproc_subdev subdev;
-
- const char *name;
+ struct qcom_ssr_subsystem *info;
};
-void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
+void qcom_minidump(struct rproc *rproc, unsigned int minidump_id);
+
+void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink,
+ const char *ssr_name);
void qcom_remove_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink);
int qcom_register_dump_segments(struct rproc *rproc, const struct firmware *fw);
@@ -47,6 +53,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
int ssctl_instance);
void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon);
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon);
#else
static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
const char *name,
@@ -58,6 +65,11 @@ static inline struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
static inline void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
{
}
+
+static inline bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return false;
+}
#endif
#endif
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
new file mode 100644
index 000000000000..aca21560e20b
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019-2020 Linaro Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_address.h>
+#include "qcom_pil_info.h"
+
+/*
+ * The PIL relocation information region is used to communicate memory regions
+ * occupied by co-processor firmware for post mortem crash analysis.
+ *
+ * It consists of an array of entries with an 8 byte textual identifier of the
+ * region followed by a 64 bit base address and 32 bit size, both little
+ * endian.
+ */
+#define PIL_RELOC_NAME_LEN 8
+#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32))
+
+struct pil_reloc {
+ void __iomem *base;
+ size_t num_entries;
+};
+
+static struct pil_reloc _reloc __read_mostly;
+static DEFINE_MUTEX(pil_reloc_lock);
+
+static int qcom_pil_info_init(void)
+{
+ struct device_node *np;
+ struct resource imem;
+ void __iomem *base;
+ int ret;
+
+ /* Already initialized? */
+ if (_reloc.base)
+ return 0;
+
+ np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &imem);
+ of_node_put(np);
+ if (ret < 0)
+ return ret;
+
+ base = ioremap(imem.start, resource_size(&imem));
+ if (!base) {
+ pr_err("failed to map PIL relocation info region\n");
+ return -ENOMEM;
+ }
+
+ memset_io(base, 0, resource_size(&imem));
+
+ _reloc.base = base;
+ _reloc.num_entries = (u32)resource_size(&imem) / PIL_RELOC_ENTRY_SIZE;
+
+ return 0;
+}
+
+/**
+ * qcom_pil_info_store() - store PIL information of image in IMEM
+ * @image: name of the image
+ * @base: base address of the loaded image
+ * @size: size of the loaded image
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
+{
+ char buf[PIL_RELOC_NAME_LEN];
+ void __iomem *entry;
+ int ret;
+ int i;
+
+ mutex_lock(&pil_reloc_lock);
+ ret = qcom_pil_info_init();
+ if (ret < 0) {
+ mutex_unlock(&pil_reloc_lock);
+ return ret;
+ }
+
+ for (i = 0; i < _reloc.num_entries; i++) {
+ entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE;
+
+ memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN);
+
+ /*
+ * An empty record means we didn't find it, given that the
+ * records are packed.
+ */
+ if (!buf[0])
+ goto found_unused;
+
+ if (!strncmp(buf, image, PIL_RELOC_NAME_LEN))
+ goto found_existing;
+ }
+
+ pr_warn("insufficient PIL info slots\n");
+ mutex_unlock(&pil_reloc_lock);
+ return -ENOMEM;
+
+found_unused:
+ memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
+found_existing:
+ /* Use two writel() as base is only aligned to 4 bytes on odd entries */
+ writel(base, entry + PIL_RELOC_NAME_LEN);
+ writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4);
+ writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64));
+ mutex_unlock(&pil_reloc_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_pil_info_store);
+
+static void __exit pil_reloc_exit(void)
+{
+ mutex_lock(&pil_reloc_lock);
+ iounmap(_reloc.base);
+ _reloc.base = NULL;
+ mutex_unlock(&pil_reloc_lock);
+}
+module_exit(pil_reloc_exit);
+
+MODULE_DESCRIPTION("Qualcomm PIL relocation info");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h
new file mode 100644
index 000000000000..0dce6142935e
--- /dev/null
+++ b/drivers/remoteproc/qcom_pil_info.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __QCOM_PIL_INFO_H__
+#define __QCOM_PIL_INFO_H__
+
+#include <linux/types.h>
+
+int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size);
+
+#endif
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index cb0f4a0be032..5280ec9b5449 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -8,13 +8,40 @@
*/
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/interconnect.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
+#include "qcom_common.h"
#include "qcom_q6v5.h"
+#define Q6V5_LOAD_STATE_MSG_LEN 64
+#define Q6V5_PANIC_DELAY_MS 200
+
+static int q6v5_load_state_toggle(struct qcom_q6v5 *q6v5, bool enable)
+{
+ char buf[Q6V5_LOAD_STATE_MSG_LEN];
+ int ret;
+
+ if (!q6v5->qmp)
+ return 0;
+
+ ret = snprintf(buf, sizeof(buf),
+ "{class: image, res: load_state, name: %s, val: %s}",
+ q6v5->load_state, enable ? "on" : "off");
+
+ WARN_ON(ret >= Q6V5_LOAD_STATE_MSG_LEN);
+
+ ret = qmp_send(q6v5->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(q6v5->dev, "failed to toggle load state\n");
+
+ return ret;
+}
+
/**
* qcom_q6v5_prepare() - reinitialize the qcom_q6v5 context before start
* @q6v5: reference to qcom_q6v5 context to be reinitialized
@@ -23,6 +50,20 @@
*/
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5)
{
+ int ret;
+
+ ret = icc_set_bw(q6v5->path, 0, UINT_MAX);
+ if (ret < 0) {
+ dev_err(q6v5->dev, "failed to set bandwidth request\n");
+ return ret;
+ }
+
+ ret = q6v5_load_state_toggle(q6v5, true);
+ if (ret) {
+ icc_set_bw(q6v5->path, 0, 0);
+ return ret;
+ }
+
reinit_completion(&q6v5->start_done);
reinit_completion(&q6v5->stop_done);
@@ -44,6 +85,10 @@ EXPORT_SYMBOL_GPL(qcom_q6v5_prepare);
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5)
{
disable_irq(q6v5->handover_irq);
+ q6v5_load_state_toggle(q6v5, false);
+
+ /* Disable interconnect vote, in case handover never happened */
+ icc_set_bw(q6v5->path, 0, 0);
return !q6v5->handover_issued;
}
@@ -127,6 +172,8 @@ static irqreturn_t q6v5_handover_interrupt(int irq, void *data)
if (q6v5->handover)
q6v5->handover(q6v5);
+ icc_set_bw(q6v5->path, 0, 0);
+
q6v5->handover_issued = true;
return IRQ_HANDLED;
@@ -144,13 +191,20 @@ static irqreturn_t q6v5_stop_interrupt(int irq, void *data)
/**
* qcom_q6v5_request_stop() - request the remote processor to stop
* @q6v5: reference to qcom_q6v5 context
+ * @sysmon: reference to the remote's sysmon instance, or NULL
*
* Return: 0 on success, negative errno on failure
*/
-int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon)
{
int ret;
+ q6v5->running = false;
+
+ /* Don't perform SMP2P dance if sysmon already shut down the remote */
+ if (qcom_sysmon_shutdown_acked(sysmon))
+ return 0;
+
qcom_smem_state_update_bits(q6v5->state,
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
@@ -163,17 +217,36 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
EXPORT_SYMBOL_GPL(qcom_q6v5_request_stop);
/**
+ * qcom_q6v5_panic() - panic handler to invoke a stop on the remote
+ * @q6v5: reference to qcom_q6v5 context
+ *
+ * Set the stop bit and sleep in order to allow the remote processor to flush
+ * its caches etc for post mortem debugging.
+ *
+ * Return: 200ms
+ */
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5)
+{
+ qcom_smem_state_update_bits(q6v5->state,
+ BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
+
+ return Q6V5_PANIC_DELAY_MS;
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_panic);
+
+/**
* qcom_q6v5_init() - initializer of the q6v5 common struct
* @q6v5: handle to be initialized
* @pdev: platform_device reference for acquiring resources
* @rproc: associated remoteproc instance
* @crash_reason: SMEM id for crash reason string, or 0 if none
+ * @load_state: load state resource string
* @handover: function to be called when proxy resources should be released
*
* Return: 0 on success, negative errno on failure
*/
int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
- struct rproc *rproc, int crash_reason,
+ struct rproc *rproc, int crash_reason, const char *load_state,
void (*handover)(struct qcom_q6v5 *q6v5))
{
int ret;
@@ -252,15 +325,45 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
return ret;
}
- q6v5->state = qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
+ q6v5->state = devm_qcom_smem_state_get(&pdev->dev, "stop", &q6v5->stop_bit);
if (IS_ERR(q6v5->state)) {
dev_err(&pdev->dev, "failed to acquire stop state\n");
return PTR_ERR(q6v5->state);
}
+ q6v5->load_state = devm_kstrdup_const(&pdev->dev, load_state, GFP_KERNEL);
+ q6v5->qmp = qmp_get(&pdev->dev);
+ if (IS_ERR(q6v5->qmp)) {
+ if (PTR_ERR(q6v5->qmp) != -ENODEV)
+ return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->qmp),
+ "failed to acquire load state\n");
+ q6v5->qmp = NULL;
+ } else if (!q6v5->load_state) {
+ if (!load_state)
+ dev_err(&pdev->dev, "load state resource string empty\n");
+
+ qmp_put(q6v5->qmp);
+ return load_state ? -ENOMEM : -EINVAL;
+ }
+
+ q6v5->path = devm_of_icc_get(&pdev->dev, NULL);
+ if (IS_ERR(q6v5->path))
+ return dev_err_probe(&pdev->dev, PTR_ERR(q6v5->path),
+ "failed to acquire interconnect path\n");
+
return 0;
}
EXPORT_SYMBOL_GPL(qcom_q6v5_init);
+/**
+ * qcom_q6v5_deinit() - deinitialize the q6v5 common struct
+ * @q6v5: reference to qcom_q6v5 context to be deinitialized
+ */
+void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5)
+{
+ qmp_put(q6v5->qmp);
+}
+EXPORT_SYMBOL_GPL(qcom_q6v5_deinit);
+
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Q6V5");
diff --git a/drivers/remoteproc/qcom_q6v5.h b/drivers/remoteproc/qcom_q6v5.h
index 7ac92c1e0f49..5a859c41896e 100644
--- a/drivers/remoteproc/qcom_q6v5.h
+++ b/drivers/remoteproc/qcom_q6v5.h
@@ -5,15 +5,22 @@
#include <linux/kernel.h>
#include <linux/completion.h>
+#include <linux/soc/qcom/qcom_aoss.h>
+struct icc_path;
struct rproc;
struct qcom_smem_state;
+struct qcom_sysmon;
struct qcom_q6v5 {
struct device *dev;
struct rproc *rproc;
struct qcom_smem_state *state;
+ struct qmp *qmp;
+
+ struct icc_path *path;
+
unsigned stop_bit;
int wdog_irq;
@@ -31,16 +38,19 @@ struct qcom_q6v5 {
bool running;
+ const char *load_state;
void (*handover)(struct qcom_q6v5 *q6v5);
};
int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
- struct rproc *rproc, int crash_reason,
+ struct rproc *rproc, int crash_reason, const char *load_state,
void (*handover)(struct qcom_q6v5 *q6v5));
+void qcom_q6v5_deinit(struct qcom_q6v5 *q6v5);
int qcom_q6v5_prepare(struct qcom_q6v5 *q6v5);
int qcom_q6v5_unprepare(struct qcom_q6v5 *q6v5);
-int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5);
+int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5, struct qcom_sysmon *sysmon);
int qcom_q6v5_wait_for_start(struct qcom_q6v5 *q6v5, int timeout);
+unsigned long qcom_q6v5_panic(struct qcom_q6v5 *q6v5);
#endif
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index e953886b2eb7..2f3b9f54251e 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -26,11 +26,13 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
/* time out value */
#define ACK_TIMEOUT 1000
+#define ACK_TIMEOUT_US 1000000
#define BOOT_FSM_TIMEOUT 10000
/* mask values */
#define EVB_MASK GENMASK(27, 4)
@@ -50,6 +52,8 @@
#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP_CBCR 0x3c
+#define QCOM_Q6V5_RPROC_PROXY_PD_MAX 3
+
struct adsp_pil_data {
int crash_reason_smem;
const char *firmware_name;
@@ -57,9 +61,13 @@ struct adsp_pil_data {
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
+ bool is_wpss;
+ bool auto_boot;
const char **clk_ids;
int num_clks;
+ const char **proxy_pd_names;
+ const char *load_state;
};
struct qcom_adsp {
@@ -82,6 +90,7 @@ struct qcom_adsp {
unsigned int halt_lpass;
int crash_reason_smem;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -91,11 +100,151 @@ struct qcom_adsp {
void *mem_region;
size_t mem_size;
+ struct device *proxy_pds[QCOM_Q6V5_RPROC_PROXY_PD_MAX];
+ size_t proxy_pd_count;
+
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+
+ int (*shutdown)(struct qcom_adsp *adsp);
};
+static int qcom_rproc_pds_attach(struct device *dev, struct qcom_adsp *adsp,
+ const char **pd_names)
+{
+ struct device **devs = adsp->proxy_pds;
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ if (num_pds > ARRAY_SIZE(adsp->proxy_pds))
+ return -E2BIG;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+}
+
+static void qcom_rproc_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
+static int qcom_rproc_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+}
+
+static void qcom_rproc_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int qcom_wpss_shutdown(struct qcom_adsp *adsp)
+{
+ unsigned int val;
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 1);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ val, 1000, ACK_TIMEOUT_US);
+
+ /* Assert the WPSS PDC Reset */
+ reset_control_assert(adsp->pdc_sync_reset);
+
+ /* Place the WPSS processor into reset */
+ reset_control_assert(adsp->restart);
+
+ /* wait after asserting subsystem restart from AOSS */
+ usleep_range(200, 205);
+
+ /* Remove the WPSS reset */
+ reset_control_deassert(adsp->restart);
+
+ /* De-assert the WPSS PDC Reset */
+ reset_control_deassert(adsp->pdc_sync_reset);
+
+ usleep_range(100, 105);
+
+ clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
+
+ regmap_write(adsp->halt_map, adsp->halt_lpass + LPASS_HALTREQ_REG, 0);
+
+ /* Wait for halt ACK from QDSP6 */
+ regmap_read_poll_timeout(adsp->halt_map,
+ adsp->halt_lpass + LPASS_HALTACK_REG, val,
+ !val, 1000, ACK_TIMEOUT_US);
+
+ return 0;
+}
+
static int qcom_adsp_shutdown(struct qcom_adsp *adsp)
{
unsigned long timeout;
@@ -164,10 +313,17 @@ reset:
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
- return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0,
+ adsp->mem_region, adsp->mem_phys,
+ adsp->mem_size, &adsp->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -176,15 +332,17 @@ static int adsp_start(struct rproc *rproc)
int ret;
unsigned int val;
- qcom_q6v5_prepare(&adsp->q6v5);
+ ret = qcom_q6v5_prepare(&adsp->q6v5);
+ if (ret)
+ return ret;
ret = clk_prepare_enable(adsp->xo);
if (ret)
goto disable_irqs;
- dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
- ret = pm_runtime_get_sync(adsp->dev);
- if (ret)
+ ret = qcom_rproc_pds_enable(adsp, adsp->proxy_pds,
+ adsp->proxy_pd_count);
+ if (ret < 0)
goto disable_xo_clk;
ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
@@ -230,8 +388,7 @@ static int adsp_start(struct rproc *rproc)
disable_adsp_clks:
clk_bulk_disable_unprepare(adsp->num_clks, adsp->clks);
disable_power_domain:
- dev_pm_genpd_set_performance_state(adsp->dev, 0);
- pm_runtime_put(adsp->dev);
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
disable_irqs:
@@ -245,8 +402,7 @@ static void qcom_adsp_pil_handover(struct qcom_q6v5 *q6v5)
struct qcom_adsp *adsp = container_of(q6v5, struct qcom_adsp, q6v5);
clk_disable_unprepare(adsp->xo);
- dev_pm_genpd_set_performance_state(adsp->dev, 0);
- pm_runtime_put(adsp->dev);
+ qcom_rproc_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
@@ -255,11 +411,11 @@ static int adsp_stop(struct rproc *rproc)
int handover;
int ret;
- ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
- ret = qcom_adsp_shutdown(adsp);
+ ret = adsp->shutdown(adsp);
if (ret)
dev_err(adsp->dev, "failed to shutdown: %d\n", ret);
@@ -270,7 +426,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -282,12 +438,20 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
};
static int adsp_init_clock(struct qcom_adsp *adsp, const char **clk_ids)
@@ -345,15 +509,12 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
struct platform_device *pdev)
{
struct device_node *syscon;
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!adsp->qdsp6ss_base) {
+ adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adsp->qdsp6ss_base)) {
dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
- return -ENOMEM;
+ return PTR_ERR(adsp->qdsp6ss_base);
}
syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
@@ -390,6 +551,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
@@ -409,6 +571,7 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_probe(struct platform_device *pdev)
{
const struct adsp_pil_data *desc;
+ const char *firmware_name;
struct qcom_adsp *adsp;
struct rproc *rproc;
int ret;
@@ -417,18 +580,35 @@ static int adsp_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
+ firmware_name = desc->firmware_name;
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &firmware_name);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read firmware-name\n");
+ return ret;
+ }
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- desc->firmware_name, sizeof(*adsp));
+ firmware_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+ rproc->auto_boot = desc->auto_boot;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
+ if (desc->is_wpss)
+ adsp->shutdown = qcom_wpss_shutdown;
+ else
+ adsp->shutdown = qcom_adsp_shutdown;
+
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
@@ -437,7 +617,13 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- pm_runtime_enable(adsp->dev);
+ ret = qcom_rproc_pds_attach(adsp->dev, adsp,
+ desc->proxy_pd_names);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
+ goto free_rproc;
+ }
+ adsp->proxy_pd_count = ret;
ret = adsp_init_reset(adsp);
if (ret)
@@ -448,11 +634,11 @@ static int adsp_probe(struct platform_device *pdev)
goto disable_pm;
ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
- qcom_adsp_pil_handover);
+ desc->load_state, qcom_adsp_pil_handover);
if (ret)
goto disable_pm;
- qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
desc->sysmon_name,
@@ -469,7 +655,8 @@ static int adsp_probe(struct platform_device *pdev)
return 0;
disable_pm:
- pm_runtime_disable(adsp->dev);
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+
free_rproc:
rproc_free(rproc);
@@ -482,10 +669,11 @@ static int adsp_remove(struct platform_device *pdev)
rproc_del(adsp->rproc);
+ qcom_q6v5_deinit(&adsp->q6v5);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
- pm_runtime_disable(adsp->dev);
+ qcom_rproc_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
rproc_free(adsp->rproc);
return 0;
@@ -497,11 +685,16 @@ static const struct adsp_pil_data adsp_resource_init = {
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
+ .is_wpss = false,
+ .auto_boot = true,
.clk_ids = (const char*[]) {
"sway_cbcr", "lpass_ahbs_aon_cbcr", "lpass_ahbm_aon_cbcr",
"qdsp6ss_xo", "qdsp6ss_sleep", "qdsp6ss_core", NULL
},
.num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
};
static const struct adsp_pil_data cdsp_resource_init = {
@@ -510,15 +703,39 @@ static const struct adsp_pil_data cdsp_resource_init = {
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
+ .is_wpss = false,
+ .auto_boot = true,
.clk_ids = (const char*[]) {
"sway", "tbu", "bimc", "ahb_aon", "q6ss_slave", "q6ss_master",
"q6_axim", NULL
},
.num_clks = 7,
+ .proxy_pd_names = (const char*[]) {
+ "cx", NULL
+ },
+};
+
+static const struct adsp_pil_data wpss_resource_init = {
+ .crash_reason_smem = 626,
+ .firmware_name = "wpss.mdt",
+ .ssr_name = "wpss",
+ .sysmon_name = "wpss",
+ .ssctl_id = 0x19,
+ .is_wpss = true,
+ .auto_boot = false,
+ .load_state = "wpss",
+ .clk_ids = (const char*[]) {
+ "ahb_bdg", "ahb", "rscp", NULL
+ },
+ .num_clks = 3,
+ .proxy_pd_names = (const char*[]) {
+ "cx", "mx", NULL
+ },
};
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,qcs404-cdsp-pil", .data = &cdsp_resource_init },
+ { .compatible = "qcom,sc7280-wpss-pil", .data = &wpss_resource_init },
{ .compatible = "qcom,sdm845-adsp-pil", .data = &adsp_resource_init },
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 471128a2e723..af217de75e4d 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/devcoredump.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -25,15 +26,19 @@
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "remoteproc_internal.h"
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include <linux/qcom_scm.h>
#define MPSS_CRASH_REASON_SMEM 421
+#define MBA_LOG_SIZE SZ_4K
+
/* RMB Status Register Values */
#define RMB_PBL_SUCCESS 0x1
@@ -68,14 +73,28 @@
#define AXI_HALTREQ_REG 0x0
#define AXI_HALTACK_REG 0x4
#define AXI_IDLE_REG 0x8
+#define AXI_GATING_VALID_OVERRIDE BIT(0)
+
+#define HALT_ACK_TIMEOUT_US 100000
+
+/* QACCEPT Register Offsets */
+#define QACCEPT_ACCEPT_REG 0x0
+#define QACCEPT_ACTIVE_REG 0x4
+#define QACCEPT_DENY_REG 0x8
+#define QACCEPT_REQ_REG 0xC
-#define HALT_ACK_TIMEOUT_MS 100
+#define QACCEPT_TIMEOUT_US 50
/* QDSP6SS_RESET */
#define Q6SS_STOP_CORE BIT(0)
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
+/* QDSP6SS CBCR */
+#define Q6SS_CBCR_CLKEN BIT(0)
+#define Q6SS_CBCR_CLKOFF BIT(31)
+#define Q6SS_CBCR_TIMEOUT_US 200
+
/* QDSP6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
@@ -96,15 +115,14 @@
#define QDSP6v56_BHS_ON BIT(24)
#define QDSP6v56_CLAMP_WL BIT(21)
#define QDSP6v56_CLAMP_QMC_MEM BIT(22)
-#define HALT_CHECK_MAX_LOOPS 200
#define QDSP6SS_XO_CBCR 0x0038
#define QDSP6SS_ACC_OVERRIDE_VAL 0x20
/* QDSP6v65 parameters */
+#define QDSP6SS_CORE_CBCR 0x20
#define QDSP6SS_SLEEP 0x3C
#define QDSP6SS_BOOT_CORE_START 0x400
#define QDSP6SS_BOOT_CMD 0x404
-#define SLEEP_CHECK_MAX_LOOPS 200
#define BOOT_FSM_TIMEOUT 10000
struct reg_info {
@@ -122,15 +140,20 @@ struct qcom_mss_reg_res {
struct rproc_hexagon_res {
const char *hexagon_mba_image;
struct qcom_mss_reg_res *proxy_supply;
+ struct qcom_mss_reg_res *fallback_proxy_supply;
struct qcom_mss_reg_res *active_supply;
char **proxy_clk_names;
char **reset_clk_names;
char **active_clk_names;
- char **active_pd_names;
char **proxy_pd_names;
int version;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
+ bool has_spare_reg;
+ bool has_qaccept_regs;
+ bool has_ext_cntl_regs;
+ bool has_vq6;
};
struct q6v5 {
@@ -141,9 +164,22 @@ struct q6v5 {
void __iomem *rmb_base;
struct regmap *halt_map;
+ struct regmap *conn_map;
+
u32 halt_q6;
u32 halt_modem;
u32 halt_nc;
+ u32 halt_vq6;
+ u32 conn_box;
+
+ u32 qaccept_mdm;
+ u32 qaccept_cx;
+ u32 qaccept_axi;
+
+ u32 axim1_clk_off;
+ u32 crypto_clk_off;
+ u32 force_clk_on;
+ u32 rscc_disable;
struct reset_control *mss_restart;
struct reset_control *pdc_reset;
@@ -153,40 +189,43 @@ struct q6v5 {
struct clk *active_clks[8];
struct clk *reset_clks[4];
struct clk *proxy_clks[4];
- struct device *active_pds[1];
struct device *proxy_pds[3];
int active_clk_count;
int reset_clk_count;
int proxy_clk_count;
- int active_pd_count;
int proxy_pd_count;
struct reg_info active_regs[1];
- struct reg_info proxy_regs[3];
+ struct reg_info proxy_regs[1];
+ struct reg_info fallback_proxy_regs[2];
int active_reg_count;
int proxy_reg_count;
-
- bool running;
+ int fallback_proxy_reg_count;
bool dump_mba_loaded;
- unsigned long dump_segment_mask;
- unsigned long dump_complete_mask;
+ size_t current_dump_size;
+ size_t total_dump_size;
phys_addr_t mba_phys;
- void *mba_region;
size_t mba_size;
+ size_t dp_size;
phys_addr_t mpss_phys;
phys_addr_t mpss_reloc;
- void *mpss_region;
size_t mpss_size;
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+ struct platform_device *bam_dmux;
bool need_mem_protection;
bool has_alt_reset;
+ bool has_mba_logs;
+ bool has_spare_reg;
+ bool has_qaccept_regs;
+ bool has_ext_cntl_regs;
+ bool has_vq6;
int mpss_perm;
int mba_perm;
const char *hexagon_mdt_image;
@@ -198,6 +237,8 @@ enum {
MSS_MSM8974,
MSS_MSM8996,
MSS_MSM8998,
+ MSS_SC7180,
+ MSS_SC7280,
MSS_SDM845,
};
@@ -333,8 +374,11 @@ static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
for (i = 0; i < pd_count; i++) {
dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
ret = pm_runtime_get_sync(pds[i]);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
goto unroll_pd_votes;
+ }
}
return 0;
@@ -346,7 +390,7 @@ unroll_pd_votes:
}
return ret;
-};
+}
static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
@@ -360,30 +404,71 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
}
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
- bool remote_owner, phys_addr_t addr,
+ bool local, bool remote, phys_addr_t addr,
size_t size)
{
- struct qcom_scm_vmperm next;
+ struct qcom_scm_vmperm next[2];
+ int perms = 0;
if (!qproc->need_mem_protection)
return 0;
- if (remote_owner && *current_perm == BIT(QCOM_SCM_VMID_MSS_MSA))
- return 0;
- if (!remote_owner && *current_perm == BIT(QCOM_SCM_VMID_HLOS))
+
+ if (local == !!(*current_perm & BIT(QCOM_SCM_VMID_HLOS)) &&
+ remote == !!(*current_perm & BIT(QCOM_SCM_VMID_MSS_MSA)))
return 0;
- next.vmid = remote_owner ? QCOM_SCM_VMID_MSS_MSA : QCOM_SCM_VMID_HLOS;
- next.perm = remote_owner ? QCOM_SCM_PERM_RW : QCOM_SCM_PERM_RWX;
+ if (local) {
+ next[perms].vmid = QCOM_SCM_VMID_HLOS;
+ next[perms].perm = QCOM_SCM_PERM_RWX;
+ perms++;
+ }
+
+ if (remote) {
+ next[perms].vmid = QCOM_SCM_VMID_MSS_MSA;
+ next[perms].perm = QCOM_SCM_PERM_RW;
+ perms++;
+ }
return qcom_scm_assign_mem(addr, ALIGN(size, SZ_4K),
- current_perm, &next, 1);
+ current_perm, next, perms);
+}
+
+static void q6v5_debug_policy_load(struct q6v5 *qproc, void *mba_region)
+{
+ const struct firmware *dp_fw;
+
+ if (request_firmware_direct(&dp_fw, "msadp", qproc->dev))
+ return;
+
+ if (SZ_1M + dp_fw->size <= qproc->mba_size) {
+ memcpy(mba_region + SZ_1M, dp_fw->data, dp_fw->size);
+ qproc->dp_size = dp_fw->size;
+ }
+
+ release_firmware(dp_fw);
}
static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5 *qproc = rproc->priv;
+ void *mba_region;
+
+ /* MBA is restricted to a maximum size of 1M */
+ if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
+ dev_err(qproc->dev, "MBA firmware load failed\n");
+ return -EINVAL;
+ }
+
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region) {
+ dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
+ &qproc->mba_phys, qproc->mba_size);
+ return -EBUSY;
+ }
- memcpy(qproc->mba_region, fw->data, fw->size);
+ memcpy(mba_region, fw->data, fw->size);
+ q6v5_debug_policy_load(qproc, mba_region);
+ memunmap(mba_region);
return 0;
}
@@ -396,6 +481,30 @@ static int q6v5_reset_assert(struct q6v5 *qproc)
reset_control_assert(qproc->pdc_reset);
ret = reset_control_reset(qproc->mss_restart);
reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_spare_reg) {
+ /*
+ * When the AXI pipeline is being reset with the Q6 modem partly
+ * operational there is possibility of AXI valid signal to
+ * glitch, leading to spurious transactions and Q6 hangs. A work
+ * around is employed by asserting the AXI_GATING_VALID_OVERRIDE
+ * BIT before triggering Q6 MSS reset. AXI_GATING_VALID_OVERRIDE
+ * is withdrawn post MSS assert followed by a MSS deassert,
+ * while holding the PDC reset.
+ */
+ reset_control_assert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 1);
+ reset_control_assert(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ regmap_update_bits(qproc->conn_map, qproc->conn_box,
+ AXI_GATING_VALID_OVERRIDE, 0);
+ ret = reset_control_deassert(qproc->mss_restart);
+ } else if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
+ reset_control_assert(qproc->pdc_reset);
+ reset_control_assert(qproc->mss_restart);
+ reset_control_deassert(qproc->pdc_reset);
+ ret = reset_control_deassert(qproc->mss_restart);
} else {
ret = reset_control_assert(qproc->mss_restart);
}
@@ -413,6 +522,8 @@ static int q6v5_reset_deassert(struct q6v5 *qproc)
ret = reset_control_reset(qproc->mss_restart);
writel(0, qproc->rmb_base + RMB_MBA_ALT_RESET);
reset_control_deassert(qproc->pdc_reset);
+ } else if (qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
+ ret = reset_control_reset(qproc->mss_restart);
} else {
ret = reset_control_deassert(qproc->mss_restart);
}
@@ -466,6 +577,31 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms)
return val;
}
+static void q6v5_dump_mba_logs(struct q6v5 *qproc)
+{
+ struct rproc *rproc = qproc->rproc;
+ void *data;
+ void *mba_region;
+
+ if (!qproc->has_mba_logs)
+ return;
+
+ if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys,
+ qproc->mba_size))
+ return;
+
+ mba_region = memremap(qproc->mba_phys, qproc->mba_size, MEMREMAP_WC);
+ if (!mba_region)
+ return;
+
+ data = vmalloc(MBA_LOG_SIZE);
+ if (data) {
+ memcpy(data, mba_region, MBA_LOG_SIZE);
+ dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL);
+ }
+ memunmap(mba_region);
+}
+
static int q6v5proc_reset(struct q6v5 *qproc)
{
u32 val;
@@ -474,12 +610,12 @@ static int q6v5proc_reset(struct q6v5 *qproc)
if (qproc->version == MSS_SDM845) {
val = readl(qproc->reg_base + QDSP6SS_SLEEP);
- val |= 0x1;
+ val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_SLEEP);
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
- val, !(val & BIT(31)), 1,
- SLEEP_CHECK_MAX_LOOPS);
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
return -ETIMEDOUT;
@@ -500,6 +636,56 @@ static int q6v5proc_reset(struct q6v5 *qproc)
}
goto pbl_wait;
+ } else if (qproc->version == MSS_SC7180 || qproc->version == MSS_SC7280) {
+ val = readl(qproc->reg_base + QDSP6SS_SLEEP);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_SLEEP);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_SLEEP,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS Sleep clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Turn on the XO clock needed for PLL setup */
+ val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
+
+ ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "QDSP6SS XO clock timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Configure Q6 core CBCR to auto-enable after reset sequence */
+ val = readl(qproc->reg_base + QDSP6SS_CORE_CBCR);
+ val |= Q6SS_CBCR_CLKEN;
+ writel(val, qproc->reg_base + QDSP6SS_CORE_CBCR);
+
+ /* De-assert the Q6 stop core signal */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START);
+
+ /* Wait for 10 us for any staggering logic to settle */
+ usleep_range(10, 20);
+
+ /* Trigger the boot FSM to start the Q6 out-of-reset sequence */
+ writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD);
+
+ /* Poll the MSS_STATUS for FSM completion */
+ ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS,
+ val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT);
+ if (ret) {
+ dev_err(qproc->dev, "Boot FSM failed to complete.\n");
+ /* Reset the modem so that boot FSM is in reset state */
+ q6v5_reset_deassert(qproc);
+ return ret;
+ }
+ goto pbl_wait;
} else if (qproc->version == MSS_MSM8996 ||
qproc->version == MSS_MSM8998) {
int mem_pwr_ctl;
@@ -515,13 +701,13 @@ static int q6v5proc_reset(struct q6v5 *qproc)
/* BHS require xo cbcr to be enabled */
val = readl(qproc->reg_base + QDSP6SS_XO_CBCR);
- val |= 0x1;
+ val |= Q6SS_CBCR_CLKEN;
writel(val, qproc->reg_base + QDSP6SS_XO_CBCR);
/* Read CLKOFF bit to go low indicating CLK is enabled */
ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_XO_CBCR,
- val, !(val & BIT(31)), 1,
- HALT_CHECK_MAX_LOOPS);
+ val, !(val & Q6SS_CBCR_CLKOFF), 1,
+ Q6SS_CBCR_TIMEOUT_US);
if (ret) {
dev_err(qproc->dev,
"xo cbcr enabling timed out (rc:%d)\n", ret);
@@ -633,11 +819,93 @@ pbl_wait:
return ret;
}
+static int q6v5proc_enable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
+{
+ unsigned int val;
+ int ret;
+
+ if (!qproc->has_qaccept_regs)
+ return 0;
+
+ if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 0);
+ regmap_write(qproc->conn_map, qproc->force_clk_on, 1);
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable axim1 clock\n");
+ return -ETIMEDOUT;
+ }
+ }
+
+ regmap_write(map, offset + QACCEPT_REQ_REG, 1);
+
+ /* Wait for accept */
+ ret = regmap_read_poll_timeout(map, offset + QACCEPT_ACCEPT_REG, val, val, 5,
+ QACCEPT_TIMEOUT_US);
+ if (ret) {
+ dev_err(qproc->dev, "qchannel enable failed\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void q6v5proc_disable_qchannel(struct q6v5 *qproc, struct regmap *map, u32 offset)
+{
+ int ret;
+ unsigned int val, retry;
+ unsigned int nretry = 10;
+ bool takedown_complete = false;
+
+ if (!qproc->has_qaccept_regs)
+ return;
+
+ while (!takedown_complete && nretry) {
+ nretry--;
+
+ /* Wait for active transactions to complete */
+ regmap_read_poll_timeout(map, offset + QACCEPT_ACTIVE_REG, val, !val, 5,
+ QACCEPT_TIMEOUT_US);
+
+ /* Request Q-channel transaction takedown */
+ regmap_write(map, offset + QACCEPT_REQ_REG, 0);
+
+ /*
+ * If the request is denied, reset the Q-channel takedown request,
+ * wait for active transactions to complete and retry takedown.
+ */
+ retry = 10;
+ while (retry) {
+ usleep_range(5, 10);
+ retry--;
+ ret = regmap_read(map, offset + QACCEPT_DENY_REG, &val);
+ if (!ret && val) {
+ regmap_write(map, offset + QACCEPT_REQ_REG, 1);
+ break;
+ }
+
+ ret = regmap_read(map, offset + QACCEPT_ACCEPT_REG, &val);
+ if (!ret && !val) {
+ takedown_complete = true;
+ break;
+ }
+ }
+
+ if (!retry)
+ break;
+ }
+
+ /* Rely on mss_restart to clear out pending transactions on takedown failure */
+ if (!takedown_complete)
+ dev_err(qproc->dev, "qchannel takedown failed\n");
+}
+
static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
struct regmap *halt_map,
u32 offset)
{
- unsigned long timeout;
unsigned int val;
int ret;
@@ -650,14 +918,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 1);
/* Wait for halt */
- timeout = jiffies + msecs_to_jiffies(HALT_ACK_TIMEOUT_MS);
- for (;;) {
- ret = regmap_read(halt_map, offset + AXI_HALTACK_REG, &val);
- if (ret || val || time_after(jiffies, timeout))
- break;
-
- msleep(1);
- }
+ regmap_read_poll_timeout(halt_map, offset + AXI_HALTACK_REG, val,
+ val, 1000, HALT_ACK_TIMEOUT_US);
ret = regmap_read(halt_map, offset + AXI_IDLE_REG, &val);
if (ret || !val)
@@ -667,7 +929,8 @@ static void q6v5proc_halt_axi_port(struct q6v5 *qproc,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
-static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
+static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
+ const char *fw_name)
{
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
@@ -678,7 +941,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
void *ptr;
int ret;
- metadata = qcom_mdt_read_metadata(fw, &size);
+ metadata = qcom_mdt_read_metadata(fw, &size, fw_name, qproc->dev);
if (IS_ERR(metadata))
return PTR_ERR(metadata);
@@ -693,7 +956,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
/* Hypervisor mapping to access metadata by modem */
mdata_perm = BIT(QCOM_SCM_VMID_HLOS);
- ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, phys, size);
+ ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, true,
+ phys, size);
if (ret) {
dev_err(qproc->dev,
"assigning Q6 access to metadata failed: %d\n", ret);
@@ -711,7 +975,8 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
/* Metadata authentication done, remove modem access */
- xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, false, phys, size);
+ xferop_ret = q6v5_xfer_mem_ownership(qproc, &mdata_perm, true, false,
+ phys, size);
if (xferop_ret)
dev_warn(qproc->dev,
"mdt buffer not reclaimed system may become unstable\n");
@@ -741,26 +1006,30 @@ static int q6v5_mba_load(struct q6v5 *qproc)
{
int ret;
int xfermemop_ret;
+ bool mba_load_err = false;
- qcom_q6v5_prepare(&qproc->q6v5);
+ ret = qcom_q6v5_prepare(&qproc->q6v5);
+ if (ret)
+ return ret;
- ret = q6v5_pds_enable(qproc, qproc->active_pds, qproc->active_pd_count);
+ ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
if (ret < 0) {
- dev_err(qproc->dev, "failed to enable active power domains\n");
+ dev_err(qproc->dev, "failed to enable proxy power domains\n");
goto disable_irqs;
}
- ret = q6v5_pds_enable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
- if (ret < 0) {
- dev_err(qproc->dev, "failed to enable proxy power domains\n");
- goto disable_active_pds;
+ ret = q6v5_regulator_enable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable fallback proxy supplies\n");
+ goto disable_proxy_pds;
}
ret = q6v5_regulator_enable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
if (ret) {
dev_err(qproc->dev, "failed to enable proxy supplies\n");
- goto disable_proxy_pds;
+ goto disable_fallback_proxy_reg;
}
ret = q6v5_clk_enable(qproc->dev, qproc->proxy_clks,
@@ -797,8 +1066,25 @@ static int q6v5_mba_load(struct q6v5 *qproc)
goto assert_reset;
}
+ ret = q6v5proc_enable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
+ if (ret) {
+ dev_err(qproc->dev, "failed to enable axi bridge\n");
+ goto disable_active_clks;
+ }
+
+ /*
+ * Some versions of the MBA firmware will upon boot wipe the MPSS region as well, so provide
+ * the Q6 access to this region.
+ */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev, "assigning Q6 access to mpss memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
/* Assign MBA image access in DDR to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false, true,
qproc->mba_phys, qproc->mba_size);
if (ret) {
dev_err(qproc->dev,
@@ -807,6 +1093,10 @@ static int q6v5_mba_load(struct q6v5 *qproc)
}
writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG);
+ if (qproc->dp_size) {
+ writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ }
ret = q6v5proc_reset(qproc);
if (ret)
@@ -828,16 +1118,23 @@ static int q6v5_mba_load(struct q6v5 *qproc)
halt_axi_ports:
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ if (qproc->has_vq6)
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
-
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
+ mba_load_err = true;
reclaim_mba:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret) {
dev_err(qproc->dev,
"Failed to reclaim mba buffer, system may become unstable\n");
+ } else if (mba_load_err) {
+ q6v5_dump_mba_logs(qproc);
}
disable_active_clks:
@@ -857,10 +1154,11 @@ disable_proxy_clk:
disable_proxy_reg:
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+disable_fallback_proxy_reg:
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
disable_proxy_pds:
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-disable_active_pds:
- q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
disable_irqs:
qcom_q6v5_unprepare(&qproc->q6v5);
@@ -873,8 +1171,11 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
u32 val;
qproc->dump_mba_loaded = false;
+ qproc->dp_size = 0;
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6);
+ if (qproc->has_vq6)
+ q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_vq6);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem);
q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc);
if (qproc->version == MSS_MSM8996) {
@@ -887,10 +1188,23 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
writel(val, qproc->reg_base + QDSP6SS_PWR_CTL_REG);
}
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(ret);
+ if (qproc->has_ext_cntl_regs) {
+ regmap_write(qproc->conn_map, qproc->rscc_disable, 1);
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->axim1_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret)
+ dev_err(qproc->dev, "failed to enable axim1 clock\n");
+
+ ret = regmap_read_poll_timeout(qproc->halt_map, qproc->crypto_clk_off, val,
+ !val, 1, Q6SS_CBCR_TIMEOUT_US);
+ if (ret)
+ dev_err(qproc->dev, "failed to enable crypto clock\n");
+ }
+
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_mdm);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_cx);
+ q6v5proc_disable_qchannel(qproc, qproc->halt_map, qproc->qaccept_axi);
q6v5_reset_assert(qproc);
@@ -900,12 +1214,11 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->active_clk_count);
q6v5_regulator_disable(qproc, qproc->active_regs,
qproc->active_reg_count);
- q6v5_pds_disable(qproc, qproc->active_pds, qproc->active_pd_count);
/* In case of failure or coredump scenario where reclaiming MBA memory
* could not happen reclaim it here.
*/
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false,
qproc->mba_phys,
qproc->mba_size);
WARN_ON(ret);
@@ -916,11 +1229,30 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc)
qproc->proxy_pd_count);
q6v5_clk_disable(qproc->dev, qproc->proxy_clks,
qproc->proxy_clk_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
}
}
+static int q6v5_reload_mba(struct rproc *rproc)
+{
+ struct q6v5 *qproc = rproc->priv;
+ const struct firmware *fw;
+ int ret;
+
+ ret = request_firmware(&fw, rproc->firmware, qproc->dev);
+ if (ret < 0)
+ return ret;
+
+ q6v5_load(rproc, fw);
+ ret = q6v5_mba_load(qproc);
+ release_firmware(fw);
+
+ return ret;
+}
+
static int q6v5_mpss_load(struct q6v5 *qproc)
{
const struct elf32_phdr *phdrs;
@@ -932,6 +1264,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
phys_addr_t boot_addr;
phys_addr_t min_addr = PHYS_ADDR_MAX;
phys_addr_t max_addr = 0;
+ u32 code_length;
bool relocate = false;
char *fw_name;
size_t fw_name_len;
@@ -958,7 +1291,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
/* Initialize the RMB validator */
writel(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
- ret = q6v5_mpss_init_image(qproc, fw);
+ ret = q6v5_mpss_init_image(qproc, fw, qproc->hexagon_mdt_image);
if (ret)
goto release_firmware;
@@ -981,6 +1314,23 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
max_addr = ALIGN(phdr->p_paddr + phdr->p_memsz, SZ_4K);
}
+ /*
+ * In case of a modem subsystem restart on secure devices, the modem
+ * memory can be reclaimed only after MBA is loaded.
+ */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, false,
+ qproc->mpss_phys, qproc->mpss_size);
+
+ /* Share ownership between Linux and MSS, during segment loading */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true, true,
+ qproc->mpss_phys, qproc->mpss_size);
+ if (ret) {
+ dev_err(qproc->dev,
+ "assigning Q6 access to mpss memory failed: %d\n", ret);
+ ret = -EAGAIN;
+ goto release_firmware;
+ }
+
mpss_reloc = relocate ? min_addr : qproc->mpss_phys;
qproc->mpss_reloc = mpss_reloc;
/* Load firmware segments */
@@ -997,7 +1347,21 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- ptr = qproc->mpss_region + offset;
+ if (phdr->p_filesz > phdr->p_memsz) {
+ dev_err(qproc->dev,
+ "refusing to load segment %d with p_filesz > p_memsz\n",
+ i);
+ ret = -EINVAL;
+ goto release_firmware;
+ }
+
+ ptr = memremap(qproc->mpss_phys + offset, phdr->p_memsz, MEMREMAP_WC);
+ if (!ptr) {
+ dev_err(qproc->dev,
+ "unable to map memory region: %pa+%zx-%x\n",
+ &qproc->mpss_phys, offset, phdr->p_memsz);
+ goto release_firmware;
+ }
if (phdr->p_filesz && phdr->p_offset < fw->size) {
/* Firmware is large enough to be non-split */
@@ -1006,6 +1370,7 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
"failed to load segment %d from truncated file %s\n",
i, fw_name);
ret = -EINVAL;
+ memunmap(ptr);
goto release_firmware;
}
@@ -1013,13 +1378,23 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
} else if (phdr->p_filesz) {
/* Replace "xxx.xxx" with "xxx.bxx" */
sprintf(fw_name + fw_name_len - 3, "b%02d", i);
- ret = request_firmware(&seg_fw, fw_name, qproc->dev);
+ ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
+ ptr, phdr->p_filesz);
if (ret) {
dev_err(qproc->dev, "failed to load %s\n", fw_name);
+ memunmap(ptr);
goto release_firmware;
}
- memcpy(ptr, seg_fw->data, seg_fw->size);
+ if (seg_fw->size != phdr->p_filesz) {
+ dev_err(qproc->dev,
+ "failed to load segment %d from truncated file %s\n",
+ i, fw_name);
+ ret = -EINVAL;
+ release_firmware(seg_fw);
+ memunmap(ptr);
+ goto release_firmware;
+ }
release_firmware(seg_fw);
}
@@ -1028,11 +1403,27 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
memset(ptr + phdr->p_filesz, 0,
phdr->p_memsz - phdr->p_filesz);
}
+ memunmap(ptr);
size += phdr->p_memsz;
+
+ code_length = readl(qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+ if (!code_length) {
+ boot_addr = relocate ? qproc->mpss_phys : min_addr;
+ writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
+ writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
+ }
+ writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
+
+ ret = readl(qproc->rmb_base + RMB_MBA_STATUS_REG);
+ if (ret < 0) {
+ dev_err(qproc->dev, "MPSS authentication failed: %d\n",
+ ret);
+ goto release_firmware;
+ }
}
/* Transfer ownership of modem ddr region to q6 */
- ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, true,
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, false, true,
qproc->mpss_phys, qproc->mpss_size);
if (ret) {
dev_err(qproc->dev,
@@ -1041,17 +1432,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
goto release_firmware;
}
- boot_addr = relocate ? qproc->mpss_phys : min_addr;
- writel(boot_addr, qproc->rmb_base + RMB_PMI_CODE_START_REG);
- writel(RMB_CMD_LOAD_READY, qproc->rmb_base + RMB_MBA_COMMAND_REG);
- writel(size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG);
-
ret = q6v5_rmb_mba_wait(qproc, RMB_MBA_AUTH_COMPLETE, 10000);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "MPSS authentication timed out\n");
else if (ret < 0)
dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret);
+ qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size);
+
release_firmware:
release_firmware(fw);
out:
@@ -1062,28 +1450,47 @@ out:
static void qcom_q6v5_dump_segment(struct rproc *rproc,
struct rproc_dump_segment *segment,
- void *dest)
+ void *dest, size_t cp_offset, size_t size)
{
int ret = 0;
struct q6v5 *qproc = rproc->priv;
- unsigned long mask = BIT((unsigned long)segment->priv);
- void *ptr = rproc_da_to_va(rproc, segment->da, segment->size);
+ int offset = segment->da - qproc->mpss_reloc;
+ void *ptr = NULL;
/* Unlock mba before copying segments */
- if (!qproc->dump_mba_loaded)
- ret = q6v5_mba_load(qproc);
+ if (!qproc->dump_mba_loaded) {
+ ret = q6v5_reload_mba(rproc);
+ if (!ret) {
+ /* Reset ownership back to Linux to copy segments */
+ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ true, false,
+ qproc->mpss_phys,
+ qproc->mpss_size);
+ }
+ }
+
+ if (!ret)
+ ptr = memremap(qproc->mpss_phys + offset + cp_offset, size, MEMREMAP_WC);
- if (!ptr || ret)
- memset(dest, 0xff, segment->size);
- else
- memcpy(dest, ptr, segment->size);
+ if (ptr) {
+ memcpy(dest, ptr, size);
+ memunmap(ptr);
+ } else {
+ memset(dest, 0xff, size);
+ }
- qproc->dump_segment_mask |= mask;
+ qproc->current_dump_size += size;
/* Reclaim mba after copying segments */
- if (qproc->dump_segment_mask == qproc->dump_complete_mask) {
- if (qproc->dump_mba_loaded)
+ if (qproc->current_dump_size == qproc->total_dump_size) {
+ if (qproc->dump_mba_loaded) {
+ /* Try to reset ownership back to Q6 */
+ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
+ false, true,
+ qproc->mpss_phys,
+ qproc->mpss_size);
q6v5_mba_reclaim(qproc);
+ }
}
}
@@ -1097,7 +1504,8 @@ static int q6v5_start(struct rproc *rproc)
if (ret)
return ret;
- dev_info(qproc->dev, "MBA booted, loading mpss\n");
+ dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n",
+ qproc->dp_size ? "" : "out");
ret = q6v5_mpss_load(qproc);
if (ret)
@@ -1109,25 +1517,21 @@ static int q6v5_start(struct rproc *rproc)
goto reclaim_mpss;
}
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, false,
- qproc->mba_phys,
+ xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+ false, qproc->mba_phys,
qproc->mba_size);
if (xfermemop_ret)
dev_err(qproc->dev,
"Failed to reclaim mba buffer system may become unstable\n");
/* Reset Dump Segment Mask */
- qproc->dump_segment_mask = 0;
- qproc->running = true;
+ qproc->current_dump_size = 0;
return 0;
reclaim_mpss:
- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm,
- false, qproc->mpss_phys,
- qproc->mpss_size);
- WARN_ON(xfermemop_ret);
q6v5_mba_reclaim(qproc);
+ q6v5_dump_mba_logs(qproc);
return ret;
}
@@ -1137,9 +1541,7 @@ static int q6v5_stop(struct rproc *rproc)
struct q6v5 *qproc = (struct q6v5 *)rproc->priv;
int ret;
- qproc->running = false;
-
- ret = qcom_q6v5_request_stop(&qproc->q6v5);
+ ret = qcom_q6v5_request_stop(&qproc->q6v5, qproc->sysmon);
if (ret == -ETIMEDOUT)
dev_err(qproc->dev, "timed out on wait\n");
@@ -1148,18 +1550,6 @@ static int q6v5_stop(struct rproc *rproc)
return 0;
}
-static void *q6v5_da_to_va(struct rproc *rproc, u64 da, int len)
-{
- struct q6v5 *qproc = rproc->priv;
- int offset;
-
- offset = da - qproc->mpss_reloc;
- if (offset < 0 || offset + len > qproc->mpss_size)
- return NULL;
-
- return qproc->mpss_region + offset;
-}
-
static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
const struct firmware *mba_fw)
{
@@ -1178,9 +1568,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
return ret;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
ehdr = (struct elf32_hdr *)fw->data;
phdrs = (struct elf32_phdr *)(ehdr + 1);
- qproc->dump_complete_mask = 0;
+ qproc->total_dump_size = 0;
for (i = 0; i < ehdr->e_phnum; i++) {
phdr = &phdrs[i];
@@ -1191,11 +1583,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr,
phdr->p_memsz,
qcom_q6v5_dump_segment,
- (void *)i);
+ NULL);
if (ret)
break;
- qproc->dump_complete_mask |= BIT(i);
+ qproc->total_dump_size += phdr->p_memsz;
}
release_firmware(fw);
@@ -1205,7 +1597,6 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc,
static const struct rproc_ops q6v5_ops = {
.start = q6v5_start,
.stop = q6v5_stop,
- .da_to_va = q6v5_da_to_va,
.parse_fw = qcom_q6v5_register_dump_segments,
.load = q6v5_load,
};
@@ -1218,27 +1609,30 @@ static void qcom_msa_handover(struct qcom_q6v5 *q6v5)
qproc->proxy_clk_count);
q6v5_regulator_disable(qproc, qproc->proxy_regs,
qproc->proxy_reg_count);
+ q6v5_regulator_disable(qproc, qproc->fallback_proxy_regs,
+ qproc->fallback_proxy_reg_count);
q6v5_pds_disable(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
}
static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
{
struct of_phandle_args args;
- struct resource *res;
+ int halt_cell_cnt = 3;
int ret;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
- qproc->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ qproc->reg_base = devm_platform_ioremap_resource_byname(pdev, "qdsp6");
if (IS_ERR(qproc->reg_base))
return PTR_ERR(qproc->reg_base);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- qproc->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ qproc->rmb_base = devm_platform_ioremap_resource_byname(pdev, "rmb");
if (IS_ERR(qproc->rmb_base))
return PTR_ERR(qproc->rmb_base);
+ if (qproc->has_vq6)
+ halt_cell_cnt++;
+
ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-regs", 3, 0, &args);
+ "qcom,halt-regs", halt_cell_cnt, 0, &args);
if (ret < 0) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
@@ -1253,6 +1647,69 @@ static int q6v5_init_mem(struct q6v5 *qproc, struct platform_device *pdev)
qproc->halt_modem = args.args[1];
qproc->halt_nc = args.args[2];
+ if (qproc->has_vq6)
+ qproc->halt_vq6 = args.args[3];
+
+ if (qproc->has_qaccept_regs) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,qaccept-regs",
+ 3, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qaccept-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->qaccept_mdm = args.args[0];
+ qproc->qaccept_cx = args.args[1];
+ qproc->qaccept_axi = args.args[2];
+ }
+
+ if (qproc->has_ext_cntl_regs) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-regs",
+ 2, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-regs index 0\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->force_clk_on = args.args[0];
+ qproc->rscc_disable = args.args[1];
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,ext-regs",
+ 2, 1, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse ext-regs index 1\n");
+ return -EINVAL;
+ }
+
+ qproc->axim1_clk_off = args.args[0];
+ qproc->crypto_clk_off = args.args[1];
+ }
+
+ if (qproc->has_spare_reg) {
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "qcom,spare-regs",
+ 1, 0, &args);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse spare-regs\n");
+ return -EINVAL;
+ }
+
+ qproc->conn_map = syscon_node_to_regmap(args.np);
+ of_node_put(args.np);
+ if (IS_ERR(qproc->conn_map))
+ return PTR_ERR(qproc->conn_map);
+
+ qproc->conn_box = args.args[0];
+ }
+
return 0;
}
@@ -1307,7 +1764,7 @@ unroll_attach:
dev_pm_domain_detach(devs[i], false);
return ret;
-};
+}
static void q6v5_pds_detach(struct q6v5 *qproc, struct device **pds,
size_t pd_count)
@@ -1327,7 +1784,7 @@ static int q6v5_init_reset(struct q6v5 *qproc)
return PTR_ERR(qproc->mss_restart);
}
- if (qproc->has_alt_reset) {
+ if (qproc->has_alt_reset || qproc->has_spare_reg || qproc->has_ext_cntl_regs) {
qproc->pdc_reset = devm_reset_control_get_exclusive(qproc->dev,
"pdc_reset");
if (IS_ERR(qproc->pdc_reset)) {
@@ -1346,41 +1803,47 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
struct resource r;
int ret;
+ /*
+ * In the absence of mba/mpss sub-child, extract the mba and mpss
+ * reserved memory regions from device's memory-region property.
+ */
child = of_get_child_by_name(qproc->dev->of_node, "mba");
- node = of_parse_phandle(child, "memory-region", 0);
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 0);
+ } else {
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
+ }
+
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mba region\n");
return ret;
}
- of_node_put(node);
qproc->mba_phys = r.start;
qproc->mba_size = resource_size(&r);
- qproc->mba_region = devm_ioremap_wc(qproc->dev, qproc->mba_phys, qproc->mba_size);
- if (!qproc->mba_region) {
- dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, qproc->mba_size);
- return -EBUSY;
+
+ if (!child) {
+ node = of_parse_phandle(qproc->dev->of_node,
+ "memory-region", 1);
+ } else {
+ child = of_get_child_by_name(qproc->dev->of_node, "mpss");
+ node = of_parse_phandle(child, "memory-region", 0);
+ of_node_put(child);
}
- child = of_get_child_by_name(qproc->dev->of_node, "mpss");
- node = of_parse_phandle(child, "memory-region", 0);
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret) {
dev_err(qproc->dev, "unable to resolve mpss region\n");
return ret;
}
- of_node_put(node);
qproc->mpss_phys = qproc->mpss_reloc = r.start;
qproc->mpss_size = resource_size(&r);
- qproc->mpss_region = devm_ioremap_wc(qproc->dev, qproc->mpss_phys, qproc->mpss_size);
- if (!qproc->mpss_region) {
- dev_err(qproc->dev, "unable to map memory region: %pa+%zx\n",
- &r.start, qproc->mpss_size);
- return -EBUSY;
- }
return 0;
}
@@ -1388,6 +1851,7 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc)
static int q6v5_probe(struct platform_device *pdev)
{
const struct rproc_hexagon_res *desc;
+ struct device_node *node;
struct q6v5 *qproc;
struct rproc *rproc;
const char *mba_image;
@@ -1403,8 +1867,10 @@ static int q6v5_probe(struct platform_device *pdev)
mba_image = desc->hexagon_mba_image;
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
0, &mba_image);
- if (ret < 0 && ret != -EINVAL)
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mba firmware-name\n");
return ret;
+ }
rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
mba_image, sizeof(*qproc));
@@ -1414,6 +1880,7 @@ static int q6v5_probe(struct platform_device *pdev)
}
rproc->auto_boot = false;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
qproc = (struct q6v5 *)rproc->priv;
qproc->dev = &pdev->dev;
@@ -1421,11 +1888,17 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->hexagon_mdt_image = "modem.mdt";
ret = of_property_read_string_index(pdev->dev.of_node, "firmware-name",
1, &qproc->hexagon_mdt_image);
- if (ret < 0 && ret != -EINVAL)
- return ret;
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
+ goto free_rproc;
+ }
platform_set_drvdata(pdev, qproc);
+ qproc->has_qaccept_regs = desc->has_qaccept_regs;
+ qproc->has_ext_cntl_regs = desc->has_ext_cntl_regs;
+ qproc->has_vq6 = desc->has_vq6;
+ qproc->has_spare_reg = desc->has_spare_reg;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
goto free_rproc;
@@ -1474,21 +1947,24 @@ static int q6v5_probe(struct platform_device *pdev)
}
qproc->active_reg_count = ret;
- ret = q6v5_pds_attach(&pdev->dev, qproc->active_pds,
- desc->active_pd_names);
- if (ret < 0) {
- dev_err(&pdev->dev, "Failed to attach active power domains\n");
- goto free_rproc;
- }
- qproc->active_pd_count = ret;
-
ret = q6v5_pds_attach(&pdev->dev, qproc->proxy_pds,
desc->proxy_pd_names);
- if (ret < 0) {
+ /* Fallback to regulators for old device trees */
+ if (ret == -ENODATA && desc->fallback_proxy_supply) {
+ ret = q6v5_regulator_init(&pdev->dev,
+ qproc->fallback_proxy_regs,
+ desc->fallback_proxy_supply);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
+ goto free_rproc;
+ }
+ qproc->fallback_proxy_reg_count = ret;
+ } else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
- goto detach_active_pds;
+ goto free_rproc;
+ } else {
+ qproc->proxy_pd_count = ret;
}
- qproc->proxy_pd_count = ret;
qproc->has_alt_reset = desc->has_alt_reset;
ret = q6v5_init_reset(qproc);
@@ -1497,33 +1973,42 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->version = desc->version;
qproc->need_mem_protection = desc->need_mem_protection;
+ qproc->has_mba_logs = desc->has_mba_logs;
- ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM,
+ ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, "modem",
qcom_msa_handover);
if (ret)
goto detach_proxy_pds;
qproc->mpss_perm = BIT(QCOM_SCM_VMID_HLOS);
qproc->mba_perm = BIT(QCOM_SCM_VMID_HLOS);
- qcom_add_glink_subdev(rproc, &qproc->glink_subdev);
+ qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss");
qcom_add_smd_subdev(rproc, &qproc->smd_subdev);
qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss");
qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12);
if (IS_ERR(qproc->sysmon)) {
ret = PTR_ERR(qproc->sysmon);
- goto detach_proxy_pds;
+ goto remove_subdevs;
}
ret = rproc_add(rproc);
if (ret)
- goto detach_proxy_pds;
+ goto remove_sysmon_subdev;
+
+ node = of_get_compatible_child(pdev->dev.of_node, "qcom,bam-dmux");
+ qproc->bam_dmux = of_platform_device_create(node, NULL, &pdev->dev);
+ of_node_put(node);
return 0;
+remove_sysmon_subdev:
+ qcom_remove_sysmon_subdev(qproc->sysmon);
+remove_subdevs:
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
detach_proxy_pds:
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-detach_active_pds:
- q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
free_rproc:
rproc_free(rproc);
@@ -1533,22 +2018,86 @@ free_rproc:
static int q6v5_remove(struct platform_device *pdev)
{
struct q6v5 *qproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = qproc->rproc;
- rproc_del(qproc->rproc);
+ if (qproc->bam_dmux)
+ of_platform_device_destroy(&qproc->bam_dmux->dev, NULL);
+ rproc_del(rproc);
+ qcom_q6v5_deinit(&qproc->q6v5);
qcom_remove_sysmon_subdev(qproc->sysmon);
- qcom_remove_glink_subdev(qproc->rproc, &qproc->glink_subdev);
- qcom_remove_smd_subdev(qproc->rproc, &qproc->smd_subdev);
- qcom_remove_ssr_subdev(qproc->rproc, &qproc->ssr_subdev);
+ qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev);
+ qcom_remove_smd_subdev(rproc, &qproc->smd_subdev);
+ qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
- q6v5_pds_detach(qproc, qproc->active_pds, qproc->active_pd_count);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
- rproc_free(qproc->rproc);
+ rproc_free(rproc);
return 0;
}
+static const struct rproc_hexagon_res sc7180_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ NULL
+ },
+ .reset_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "snoc_axi",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "mnoc_axi",
+ "nav",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = true,
+ .has_spare_reg = true,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
+ .version = MSS_SC7180,
+};
+
+static const struct rproc_hexagon_res sc7280_mss = {
+ .hexagon_mba_image = "mba.mbn",
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pka",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "offline",
+ "snoc_axi",
+ NULL
+ },
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .need_mem_protection = true,
+ .has_alt_reset = false,
+ .has_mba_logs = true,
+ .has_spare_reg = false,
+ .has_qaccept_regs = true,
+ .has_ext_cntl_regs = true,
+ .has_vq6 = true,
+ .version = MSS_SC7280,
+};
+
static const struct rproc_hexagon_res sdm845_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_clk_names = (char*[]){
@@ -1568,10 +2117,6 @@ static const struct rproc_hexagon_res sdm845_mss = {
"mnoc_axi",
NULL
},
- .active_pd_names = (char*[]){
- "load_state",
- NULL
- },
.proxy_pd_names = (char*[]){
"cx",
"mx",
@@ -1580,6 +2125,11 @@ static const struct rproc_hexagon_res sdm845_mss = {
},
.need_mem_protection = true,
.has_alt_reset = true,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
.version = MSS_SDM845,
};
@@ -1594,7 +2144,6 @@ static const struct rproc_hexagon_res msm8998_mss = {
.active_clk_names = (char*[]){
"iface",
"bus",
- "mem",
"gpll0_mss",
"mnoc_axi",
"snoc_axi",
@@ -1607,6 +2156,11 @@ static const struct rproc_hexagon_res msm8998_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
.version = MSS_MSM8998,
};
@@ -1636,6 +2190,11 @@ static const struct rproc_hexagon_res msm8996_mss = {
},
.need_mem_protection = true,
.has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
.version = MSS_MSM8996,
};
@@ -1643,6 +2202,13 @@ static const struct rproc_hexagon_res msm8916_mss = {
.hexagon_mba_image = "mba.mbn",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
.supply = "mx",
.uV = 1050000,
},
@@ -1650,10 +2216,6 @@ static const struct rproc_hexagon_res msm8916_mss = {
.supply = "cx",
.uA = 100000,
},
- {
- .supply = "pll",
- .uA = 100000,
- },
{}
},
.proxy_clk_names = (char*[]){
@@ -1666,8 +2228,18 @@ static const struct rproc_hexagon_res msm8916_mss = {
"mem",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
.version = MSS_MSM8916,
};
@@ -1675,6 +2247,13 @@ static const struct rproc_hexagon_res msm8974_mss = {
.hexagon_mba_image = "mba.b00",
.proxy_supply = (struct qcom_mss_reg_res[]) {
{
+ .supply = "pll",
+ .uA = 100000,
+ },
+ {}
+ },
+ .fallback_proxy_supply = (struct qcom_mss_reg_res[]) {
+ {
.supply = "mx",
.uV = 1050000,
},
@@ -1682,10 +2261,6 @@ static const struct rproc_hexagon_res msm8974_mss = {
.supply = "cx",
.uA = 100000,
},
- {
- .supply = "pll",
- .uA = 100000,
- },
{}
},
.active_supply = (struct qcom_mss_reg_res[]) {
@@ -1706,8 +2281,18 @@ static const struct rproc_hexagon_res msm8974_mss = {
"mem",
NULL
},
+ .proxy_pd_names = (char*[]){
+ "mx",
+ "cx",
+ NULL
+ },
.need_mem_protection = false,
.has_alt_reset = false,
+ .has_mba_logs = false,
+ .has_spare_reg = false,
+ .has_qaccept_regs = false,
+ .has_ext_cntl_regs = false,
+ .has_vq6 = false,
.version = MSS_MSM8974,
};
@@ -1717,6 +2302,8 @@ static const struct of_device_id q6v5_of_match[] = {
{ .compatible = "qcom,msm8974-mss-pil", .data = &msm8974_mss},
{ .compatible = "qcom,msm8996-mss-pil", .data = &msm8996_mss},
{ .compatible = "qcom,msm8998-mss-pil", .data = &msm8998_mss},
+ { .compatible = "qcom,sc7180-mss-pil", .data = &sc7180_mss},
+ { .compatible = "qcom,sc7280-mss-pil", .data = &sc7280_mss},
{ .compatible = "qcom,sdm845-mss-pil", .data = &sdm845_mss},
{ },
};
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index db4b3c4bacd7..6ae39c5653b1 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -15,6 +15,8 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
@@ -23,6 +25,7 @@
#include <linux/soc/qcom/smem_state.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#include "remoteproc_internal.h"
@@ -30,8 +33,13 @@ struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
int pas_id;
+ unsigned int minidump_id;
bool has_aggre2_clk;
+ bool auto_boot;
+ char **proxy_pd_names;
+
+ const char *load_state;
const char *ssr_name;
const char *sysmon_name;
int ssctl_id;
@@ -49,9 +57,15 @@ struct qcom_adsp {
struct regulator *cx_supply;
struct regulator *px_supply;
+ struct device *proxy_pds[3];
+
+ int proxy_pd_count;
+
int pas_id;
+ unsigned int minidump_id;
int crash_reason_smem;
bool has_aggre2_clk;
+ const char *info_name;
struct completion start_done;
struct completion stop_done;
@@ -65,16 +79,89 @@ struct qcom_adsp {
struct qcom_rproc_subdev smd_subdev;
struct qcom_rproc_ssr ssr_subdev;
struct qcom_sysmon *sysmon;
+
+ struct qcom_scm_pas_metadata pas_metadata;
};
+static void adsp_minidump(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = rproc->priv;
+
+ qcom_minidump(rproc, adsp->minidump_id);
+}
+
+static int adsp_pds_enable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(pds[i]);
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ goto unroll_pd_votes;
+ }
+ }
+
+ return 0;
+
+unroll_pd_votes:
+ for (i--; i >= 0; i--) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+
+ return ret;
+};
+
+static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ int i;
+
+ for (i = 0; i < pd_count; i++) {
+ dev_pm_genpd_set_performance_state(pds[i], 0);
+ pm_runtime_put(pds[i]);
+ }
+}
+
+static int adsp_unprepare(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ /*
+ * adsp_load() did pass pas_metadata to the SCM driver for storing
+ * metadata context. It might have been released already if
+ * auth_and_reset() was successful, but in other cases clean it up
+ * here.
+ */
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
+ return 0;
+}
+
static int adsp_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_pas_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_phys, &adsp->pas_metadata);
+ if (ret)
+ return ret;
- return qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id,
- adsp->mem_region, adsp->mem_phys, adsp->mem_size,
- &adsp->mem_reloc);
+ ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, adsp->pas_id,
+ adsp->mem_region, adsp->mem_phys, adsp->mem_size,
+ &adsp->mem_reloc);
+ if (ret)
+ return ret;
+ qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size);
+
+ return 0;
}
static int adsp_start(struct rproc *rproc)
@@ -82,11 +169,17 @@ static int adsp_start(struct rproc *rproc)
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int ret;
- qcom_q6v5_prepare(&adsp->q6v5);
+ ret = qcom_q6v5_prepare(&adsp->q6v5);
+ if (ret)
+ return ret;
+
+ ret = adsp_pds_enable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+ if (ret < 0)
+ goto disable_irqs;
ret = clk_prepare_enable(adsp->xo);
if (ret)
- return ret;
+ goto disable_proxy_pds;
ret = clk_prepare_enable(adsp->aggre2_clk);
if (ret)
@@ -114,6 +207,8 @@ static int adsp_start(struct rproc *rproc)
goto disable_px_supply;
}
+ qcom_scm_pas_metadata_release(&adsp->pas_metadata);
+
return 0;
disable_px_supply:
@@ -124,6 +219,10 @@ disable_aggre2_clk:
clk_disable_unprepare(adsp->aggre2_clk);
disable_xo_clk:
clk_disable_unprepare(adsp->xo);
+disable_proxy_pds:
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
+disable_irqs:
+ qcom_q6v5_unprepare(&adsp->q6v5);
return ret;
}
@@ -136,6 +235,7 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
regulator_disable(adsp->cx_supply);
clk_disable_unprepare(adsp->aggre2_clk);
clk_disable_unprepare(adsp->xo);
+ adsp_pds_disable(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
}
static int adsp_stop(struct rproc *rproc)
@@ -144,7 +244,7 @@ static int adsp_stop(struct rproc *rproc)
int handover;
int ret;
- ret = qcom_q6v5_request_stop(&adsp->q6v5);
+ ret = qcom_q6v5_request_stop(&adsp->q6v5, adsp->sysmon);
if (ret == -ETIMEDOUT)
dev_err(adsp->dev, "timed out on wait\n");
@@ -159,7 +259,7 @@ static int adsp_stop(struct rproc *rproc)
return ret;
}
-static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *adsp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
int offset;
@@ -171,12 +271,31 @@ static void *adsp_da_to_va(struct rproc *rproc, u64 da, int len)
return adsp->mem_region + offset;
}
+static unsigned long adsp_panic(struct rproc *rproc)
+{
+ struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv;
+
+ return qcom_q6v5_panic(&adsp->q6v5);
+}
+
static const struct rproc_ops adsp_ops = {
+ .unprepare = adsp_unprepare,
.start = adsp_start,
.stop = adsp_stop,
.da_to_va = adsp_da_to_va,
.parse_fw = qcom_register_dump_segments,
.load = adsp_load,
+ .panic = adsp_panic,
+};
+
+static const struct rproc_ops adsp_minidump_ops = {
+ .unprepare = adsp_unprepare,
+ .start = adsp_start,
+ .stop = adsp_stop,
+ .da_to_va = adsp_da_to_va,
+ .load = adsp_load,
+ .panic = adsp_panic,
+ .coredump = adsp_minidump,
};
static int adsp_init_clock(struct qcom_adsp *adsp)
@@ -217,6 +336,59 @@ static int adsp_init_regulator(struct qcom_adsp *adsp)
return PTR_ERR_OR_ZERO(adsp->px_supply);
}
+static int adsp_pds_attach(struct device *dev, struct device **devs,
+ char **pd_names)
+{
+ size_t num_pds = 0;
+ int ret;
+ int i;
+
+ if (!pd_names)
+ return 0;
+
+ /* Handle single power domain */
+ if (dev->pm_domain) {
+ devs[0] = dev;
+ pm_runtime_enable(dev);
+ return 1;
+ }
+
+ while (pd_names[num_pds])
+ num_pds++;
+
+ for (i = 0; i < num_pds; i++) {
+ devs[i] = dev_pm_domain_attach_by_name(dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(devs[i])) {
+ ret = PTR_ERR(devs[i]) ? : -ENODATA;
+ goto unroll_attach;
+ }
+ }
+
+ return num_pds;
+
+unroll_attach:
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(devs[i], false);
+
+ return ret;
+};
+
+static void adsp_pds_detach(struct qcom_adsp *adsp, struct device **pds,
+ size_t pd_count)
+{
+ struct device *dev = adsp->dev;
+ int i;
+
+ /* Handle single power domain */
+ if (dev->pm_domain && pd_count) {
+ pm_runtime_disable(dev);
+ return;
+ }
+
+ for (i = 0; i < pd_count; i++)
+ dev_pm_domain_detach(pds[i], false);
+}
+
static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
{
struct device_node *node;
@@ -251,6 +423,7 @@ static int adsp_probe(struct platform_device *pdev)
struct qcom_adsp *adsp;
struct rproc *rproc;
const char *fw_name;
+ const struct rproc_ops *ops = &adsp_ops;
int ret;
desc = of_device_get_match_data(&pdev->dev);
@@ -266,20 +439,30 @@ static int adsp_probe(struct platform_device *pdev)
if (ret < 0 && ret != -EINVAL)
return ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- fw_name, sizeof(*adsp));
+ if (desc->minidump_id)
+ ops = &adsp_minidump_ops;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+ rproc->auto_boot = desc->auto_boot;
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
adsp = (struct qcom_adsp *)rproc->priv;
adsp->dev = &pdev->dev;
adsp->rproc = rproc;
+ adsp->minidump_id = desc->minidump_id;
adsp->pas_id = desc->pas_id;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
+ adsp->info_name = desc->sysmon_name;
platform_set_drvdata(pdev, adsp);
+ device_wakeup_enable(adsp->dev);
+
ret = adsp_alloc_memory_region(adsp);
if (ret)
goto free_rproc;
@@ -292,12 +475,18 @@ static int adsp_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem,
+ ret = adsp_pds_attach(&pdev->dev, adsp->proxy_pds,
+ desc->proxy_pd_names);
+ if (ret < 0)
+ goto free_rproc;
+ adsp->proxy_pd_count = ret;
+
+ ret = qcom_q6v5_init(&adsp->q6v5, pdev, rproc, desc->crash_reason_smem, desc->load_state,
qcom_pas_handover);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
- qcom_add_glink_subdev(rproc, &adsp->glink_subdev);
+ qcom_add_glink_subdev(rproc, &adsp->glink_subdev, desc->ssr_name);
qcom_add_smd_subdev(rproc, &adsp->smd_subdev);
qcom_add_ssr_subdev(rproc, &adsp->ssr_subdev, desc->ssr_name);
adsp->sysmon = qcom_add_sysmon_subdev(rproc,
@@ -305,15 +494,17 @@ static int adsp_probe(struct platform_device *pdev)
desc->ssctl_id);
if (IS_ERR(adsp->sysmon)) {
ret = PTR_ERR(adsp->sysmon);
- goto free_rproc;
+ goto detach_proxy_pds;
}
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto detach_proxy_pds;
return 0;
+detach_proxy_pds:
+ adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
free_rproc:
rproc_free(rproc);
@@ -326,6 +517,7 @@ static int adsp_remove(struct platform_device *pdev)
rproc_del(adsp->rproc);
+ qcom_q6v5_deinit(&adsp->q6v5);
qcom_remove_glink_subdev(adsp->rproc, &adsp->glink_subdev);
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_smd_subdev(adsp->rproc, &adsp->smd_subdev);
@@ -340,6 +532,101 @@ static const struct adsp_data adsp_resource_init = {
.firmware_name = "adsp.mdt",
.pas_id = 1,
.has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sdm845_adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm6350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8150_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8250_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data sm8350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data msm8996_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
.ssr_name = "lpass",
.sysmon_name = "adsp",
.ssctl_id = 0x14,
@@ -350,39 +637,286 @@ static const struct adsp_data cdsp_resource_init = {
.firmware_name = "cdsp.mdt",
.pas_id = 18,
.has_aggre2_clk = false,
+ .auto_boot = true,
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sdm845_cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm6350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8150_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sm8250_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp0_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp0",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
+static const struct adsp_data sc8280xp_nsp1_resource = {
+ .crash_reason_smem = 633,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 30,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "nsp",
+ NULL
+ },
+ .ssr_name = "cdsp1",
+ .sysmon_name = "cdsp1",
+ .ssctl_id = 0x20,
+};
+
+static const struct adsp_data sm8350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ NULL
+ },
+ .load_state = "cdsp",
.ssr_name = "cdsp",
.sysmon_name = "cdsp",
.ssctl_id = 0x17,
};
+static const struct adsp_data mpss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .minidump_id = 3,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
+static const struct adsp_data sc8180x_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = false,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+};
+
static const struct adsp_data slpi_resource_init = {
.crash_reason_smem = 424,
.firmware_name = "slpi.mdt",
.pas_id = 12,
.has_aggre2_clk = true,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "ssc_cx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8150_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
.ssr_name = "dsps",
.sysmon_name = "slpi",
.ssctl_id = 0x16,
};
+static const struct adsp_data sm8250_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
+static const struct adsp_data sm8350_slpi_resource = {
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
+};
+
static const struct adsp_data wcss_resource_init = {
.crash_reason_smem = 421,
.firmware_name = "wcnss.mdt",
.pas_id = 6,
+ .auto_boot = true,
.ssr_name = "mpss",
.sysmon_name = "wcnss",
.ssctl_id = 0x12,
};
+static const struct adsp_data sdx55_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .pas_id = 4,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x22,
+};
+
static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8974-adsp-pil", .data = &adsp_resource_init},
- { .compatible = "qcom,msm8996-adsp-pil", .data = &adsp_resource_init},
+ { .compatible = "qcom,msm8996-adsp-pil", .data = &msm8996_adsp_resource},
{ .compatible = "qcom,msm8996-slpi-pil", .data = &slpi_resource_init},
+ { .compatible = "qcom,msm8998-adsp-pas", .data = &msm8996_adsp_resource},
+ { .compatible = "qcom,msm8998-slpi-pas", .data = &slpi_resource_init},
{ .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
{ .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
{ .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
- { .compatible = "qcom,sdm845-adsp-pas", .data = &adsp_resource_init},
- { .compatible = "qcom,sdm845-cdsp-pas", .data = &cdsp_resource_init},
+ { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sc8180x-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sc8180x-mpss-pas", .data = &sc8180x_mpss_resource},
+ { .compatible = "qcom,sc8280xp-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sc8280xp-nsp0-pas", .data = &sc8280xp_nsp0_resource},
+ { .compatible = "qcom,sc8280xp-nsp1-pas", .data = &sc8280xp_nsp1_resource},
+ { .compatible = "qcom,sdm660-adsp-pas", .data = &adsp_resource_init},
+ { .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
+ { .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
+ { .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
+ { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
+ { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
+ { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
+ { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8150-slpi-pas", .data = &sm8150_slpi_resource},
+ { .compatible = "qcom,sm8250-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sm8250-cdsp-pas", .data = &sm8250_cdsp_resource},
+ { .compatible = "qcom,sm8250-slpi-pas", .data = &sm8250_slpi_resource},
+ { .compatible = "qcom,sm8350-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8350-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8350-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8350-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sm8450-adsp-pas", .data = &sm8350_adsp_resource},
+ { .compatible = "qcom,sm8450-cdsp-pas", .data = &sm8350_cdsp_resource},
+ { .compatible = "qcom,sm8450-slpi-pas", .data = &sm8350_slpi_resource},
+ { .compatible = "qcom,sm8450-mpss-pas", .data = &mpss_resource_init},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index f93e1e4a1cc0..bb0947f7770e 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -4,16 +4,22 @@
* Copyright (C) 2014 Sony Mobile Communications AB
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "qcom_common.h"
+#include "qcom_pil_info.h"
#include "qcom_q6v5.h"
#define WCSS_CRASH_REASON 421
@@ -23,6 +29,9 @@
#define Q6SS_GFMUX_CTL_REG 0x020
#define Q6SS_PWR_CTL_REG 0x030
#define Q6SS_MEM_PWR_CTL 0x0B0
+#define Q6SS_STRAP_ACC 0x110
+#define Q6SS_CGC_OVERRIDE 0x034
+#define Q6SS_BCR_REG 0x6000
/* AXI Halt Register Offsets */
#define AXI_HALTREQ_REG 0x0
@@ -36,14 +45,19 @@
#define Q6SS_CORE_ARES BIT(1)
#define Q6SS_BUS_ARES_ENABLE BIT(2)
+/* Q6SS_BRC_RESET */
+#define Q6SS_BRC_BLK_ARES BIT(0)
+
/* Q6SS_GFMUX_CTL */
#define Q6SS_CLK_ENABLE BIT(1)
+#define Q6SS_SWITCH_CLK_SRC BIT(8)
/* Q6SS_PWR_CTL */
#define Q6SS_L2DATA_STBY_N BIT(18)
#define Q6SS_SLP_RET_N BIT(19)
#define Q6SS_CLAMP_IO BIT(20)
#define QDSS_BHS_ON BIT(21)
+#define QDSS_Q6_MEMORIES GENMASK(15, 0)
/* Q6SS parameters */
#define Q6SS_LDO_BYP BIT(25)
@@ -52,6 +66,7 @@
#define Q6SS_CLAMP_QMC_MEM BIT(22)
#define HALT_CHECK_MAX_LOOPS 200
#define Q6SS_XO_CBCR GENMASK(5, 3)
+#define Q6SS_SLEEP_CBCR GENMASK(5, 2)
/* Q6SS config/status registers */
#define TCSR_GLOBAL_CFG0 0x0
@@ -70,6 +85,25 @@
#define TCSR_WCSS_CLK_MASK 0x1F
#define TCSR_WCSS_CLK_ENABLE 0x14
+#define MAX_HALT_REG 3
+enum {
+ WCSS_IPQ8074,
+ WCSS_QCS404,
+};
+
+struct wcss_data {
+ const char *firmware_name;
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool aon_reset_required;
+ bool wcss_q6_reset_required;
+ const char *ssr_name;
+ const char *sysmon_name;
+ int ssctl_id;
+ const struct rproc_ops *ops;
+ bool requires_force_stop;
+};
+
struct q6v5_wcss {
struct device *dev;
@@ -81,9 +115,26 @@ struct q6v5_wcss {
u32 halt_wcss;
u32 halt_nc;
+ struct clk *xo;
+ struct clk *ahbfabric_cbcr_clk;
+ struct clk *gcc_abhs_cbcr;
+ struct clk *gcc_axim_cbcr;
+ struct clk *lcc_csr_cbcr;
+ struct clk *ahbs_cbcr;
+ struct clk *tcm_slave_cbcr;
+ struct clk *qdsp6ss_abhm_cbcr;
+ struct clk *qdsp6ss_sleep_cbcr;
+ struct clk *qdsp6ss_axim_cbcr;
+ struct clk *qdsp6ss_xo_cbcr;
+ struct clk *qdsp6ss_core_gfmux;
+ struct clk *lcc_bcr_sleep;
+ struct regulator *cx_supply;
+ struct qcom_sysmon *sysmon;
+
struct reset_control *wcss_aon_reset;
struct reset_control *wcss_reset;
struct reset_control *wcss_q6_reset;
+ struct reset_control *wcss_q6_bcr_reset;
struct qcom_q6v5 q6v5;
@@ -91,6 +142,13 @@ struct q6v5_wcss {
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
+
+ unsigned int crash_reason_smem;
+ u32 version;
+ bool requires_force_stop;
+
+ struct qcom_rproc_glink glink_subdev;
+ struct qcom_rproc_ssr ssr_subdev;
};
static int q6v5_wcss_reset(struct q6v5_wcss *wcss)
@@ -233,6 +291,207 @@ wcss_reset:
return ret;
}
+static int q6v5_wcss_qcs404_power_on(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret, idx;
+
+ /* Toggle the restart */
+ reset_control_assert(wcss->wcss_reset);
+ usleep_range(200, 300);
+ reset_control_deassert(wcss->wcss_reset);
+ usleep_range(200, 300);
+
+ /* Enable GCC_WDSP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->gcc_abhs_cbcr);
+ if (ret)
+ return ret;
+
+ /* Remove reset to the WCNSS QDSP6SS */
+ reset_control_deassert(wcss->wcss_q6_bcr_reset);
+
+ /* Enable Q6SSTOP_AHBFABRIC_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbfabric_cbcr_clk);
+ if (ret)
+ goto disable_gcc_abhs_cbcr_clk;
+
+ /* Enable the LCCCSR CBC clock, Q6SSTOP_Q6SSTOP_LCC_CSR_CBCR clock */
+ ret = clk_prepare_enable(wcss->lcc_csr_cbcr);
+ if (ret)
+ goto disable_ahbfabric_cbcr_clk;
+
+ /* Enable the Q6AHBS CBC, Q6SSTOP_Q6SS_AHBS_CBCR clock */
+ ret = clk_prepare_enable(wcss->ahbs_cbcr);
+ if (ret)
+ goto disable_csr_cbcr_clk;
+
+ /* Enable the TCM slave CBC, Q6SSTOP_Q6SS_TCM_SLAVE_CBCR clock */
+ ret = clk_prepare_enable(wcss->tcm_slave_cbcr);
+ if (ret)
+ goto disable_ahbs_cbcr_clk;
+
+ /* Enable the Q6SS AHB master CBC, Q6SSTOP_Q6SS_AHBM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_abhm_cbcr);
+ if (ret)
+ goto disable_tcm_slave_cbcr_clk;
+
+ /* Enable the Q6SS AXI master CBC, Q6SSTOP_Q6SS_AXIM_CBCR clock */
+ ret = clk_prepare_enable(wcss->qdsp6ss_axim_cbcr);
+ if (ret)
+ goto disable_abhm_cbcr_clk;
+
+ /* Enable the Q6SS XO CBC */
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ /* Read CLKOFF bit to go low indicating CLK is enabled */
+ ret = readl_poll_timeout(wcss->reg_base + Q6SS_XO_CBCR,
+ val, !(val & BIT(31)), 1,
+ HALT_CHECK_MAX_LOOPS);
+ if (ret) {
+ dev_err(wcss->dev,
+ "xo cbcr enabling timed out (rc:%d)\n", ret);
+ return ret;
+ }
+
+ writel(0, wcss->reg_base + Q6SS_CGC_OVERRIDE);
+
+ /* Enable QDSP6 sleep clock clock */
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val |= BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ /* Enable the Enable the Q6 AXI clock, GCC_WDSP_Q6SS_AXIM_CBCR*/
+ ret = clk_prepare_enable(wcss->gcc_axim_cbcr);
+ if (ret)
+ goto disable_sleep_cbcr_clk;
+
+ /* Assert resets, stop core */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val |= Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENABLE | Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Program the QDSP6SS PWR_CTL register */
+ writel(0x01700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03700000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x03300000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ writel(0x033C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /*
+ * Enable memories by turning on the QDSP6 memory foot/head switch, one
+ * bank at a time to avoid in-rush current
+ */
+ for (idx = 28; idx >= 0; idx--) {
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) |
+ (1 << idx)), wcss->reg_base + Q6SS_MEM_PWR_CTL);
+ }
+
+ writel(0x031C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+ writel(0x030C0000, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_CORE_ARES;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ /* Enable the Q6 core clock at the GFM, Q6SSTOP_QDSP6SS_GFMUX_CTL */
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val |= Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC;
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ /* Enable sleep clock branch needed for BCR circuit */
+ ret = clk_prepare_enable(wcss->lcc_bcr_sleep);
+ if (ret)
+ goto disable_core_gfmux_clk;
+
+ return 0;
+
+disable_core_gfmux_clk:
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+disable_sleep_cbcr_clk:
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~Q6SS_CLK_ENABLE;
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+disable_abhm_cbcr_clk:
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+disable_tcm_slave_cbcr_clk:
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+disable_ahbs_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+disable_csr_cbcr_clk:
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+disable_ahbfabric_cbcr_clk:
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+disable_gcc_abhs_cbcr_clk:
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ return ret;
+}
+
+static inline int q6v5_wcss_qcs404_reset(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+
+ writel(0x80800000, wcss->reg_base + Q6SS_STRAP_ACC);
+
+ /* Start core execution */
+ val = readl(wcss->reg_base + Q6SS_RESET_REG);
+ val &= ~Q6SS_STOP_CORE;
+ writel(val, wcss->reg_base + Q6SS_RESET_REG);
+
+ return 0;
+}
+
+static int q6v5_qcs404_wcss_start(struct rproc *rproc)
+{
+ struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = clk_prepare_enable(wcss->xo);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(wcss->cx_supply);
+ if (ret)
+ goto disable_xo_clk;
+
+ qcom_q6v5_prepare(&wcss->q6v5);
+
+ ret = q6v5_wcss_qcs404_power_on(wcss);
+ if (ret) {
+ dev_err(wcss->dev, "wcss clk_enable failed\n");
+ goto disable_cx_supply;
+ }
+
+ writel(rproc->bootaddr >> 4, wcss->reg_base + Q6SS_RST_EVB);
+
+ q6v5_wcss_qcs404_reset(wcss);
+
+ ret = qcom_q6v5_wait_for_start(&wcss->q6v5, 5 * HZ);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "start timed out\n");
+ goto disable_cx_supply;
+ }
+
+ return 0;
+
+disable_cx_supply:
+ regulator_disable(wcss->cx_supply);
+disable_xo_clk:
+ clk_disable_unprepare(wcss->xo);
+
+ return ret;
+}
+
static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
struct regmap *halt_map,
u32 offset)
@@ -267,6 +526,70 @@ static void q6v5_wcss_halt_axi_port(struct q6v5_wcss *wcss,
regmap_write(halt_map, offset + AXI_HALTREQ_REG, 0);
}
+static int q6v5_qcs404_wcss_shutdown(struct q6v5_wcss *wcss)
+{
+ unsigned long val;
+ int ret;
+
+ q6v5_wcss_halt_axi_port(wcss, wcss->halt_map, wcss->halt_wcss);
+
+ /* assert clamps to avoid MX current inrush */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val |= (Q6SS_CLAMP_IO | Q6SS_CLAMP_WL | Q6SS_CLAMP_QMC_MEM);
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ /* Disable memories by turning off memory foot/headswitch */
+ writel((readl(wcss->reg_base + Q6SS_MEM_PWR_CTL) &
+ ~QDSS_Q6_MEMORIES),
+ wcss->reg_base + Q6SS_MEM_PWR_CTL);
+
+ /* Clear the BHS_ON bit */
+ val = readl(wcss->reg_base + Q6SS_PWR_CTL_REG);
+ val &= ~Q6SS_BHS_ON;
+ writel(val, wcss->reg_base + Q6SS_PWR_CTL_REG);
+
+ clk_disable_unprepare(wcss->ahbfabric_cbcr_clk);
+ clk_disable_unprepare(wcss->lcc_csr_cbcr);
+ clk_disable_unprepare(wcss->tcm_slave_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_abhm_cbcr);
+ clk_disable_unprepare(wcss->qdsp6ss_axim_cbcr);
+
+ val = readl(wcss->reg_base + Q6SS_SLEEP_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_SLEEP_CBCR);
+
+ val = readl(wcss->reg_base + Q6SS_XO_CBCR);
+ val &= ~BIT(0);
+ writel(val, wcss->reg_base + Q6SS_XO_CBCR);
+
+ clk_disable_unprepare(wcss->ahbs_cbcr);
+ clk_disable_unprepare(wcss->lcc_bcr_sleep);
+
+ val = readl(wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+ val &= ~(Q6SS_CLK_ENABLE | Q6SS_SWITCH_CLK_SRC);
+ writel(val, wcss->reg_base + Q6SS_GFMUX_CTL_REG);
+
+ clk_disable_unprepare(wcss->gcc_abhs_cbcr);
+
+ ret = reset_control_assert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ ret = reset_control_deassert(wcss->wcss_reset);
+ if (ret) {
+ dev_err(wcss->dev, "wcss_reset failed\n");
+ return ret;
+ }
+ usleep_range(200, 300);
+
+ clk_disable_unprepare(wcss->gcc_axim_cbcr);
+
+ return 0;
+}
+
static int q6v5_wcss_powerdown(struct q6v5_wcss *wcss)
{
int ret;
@@ -386,27 +709,35 @@ static int q6v5_wcss_stop(struct rproc *rproc)
int ret;
/* WCSS powerdown */
- ret = qcom_q6v5_request_stop(&wcss->q6v5);
- if (ret == -ETIMEDOUT) {
- dev_err(wcss->dev, "timed out on wait\n");
- return ret;
+ if (wcss->requires_force_stop) {
+ ret = qcom_q6v5_request_stop(&wcss->q6v5, NULL);
+ if (ret == -ETIMEDOUT) {
+ dev_err(wcss->dev, "timed out on wait\n");
+ return ret;
+ }
}
- ret = q6v5_wcss_powerdown(wcss);
- if (ret)
- return ret;
-
- /* Q6 Power down */
- ret = q6v5_q6_powerdown(wcss);
- if (ret)
- return ret;
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_qcs404_wcss_shutdown(wcss);
+ if (ret)
+ return ret;
+ } else {
+ ret = q6v5_wcss_powerdown(wcss);
+ if (ret)
+ return ret;
+
+ /* Q6 Power down */
+ ret = q6v5_q6_powerdown(wcss);
+ if (ret)
+ return ret;
+ }
qcom_q6v5_unprepare(&wcss->q6v5);
return 0;
}
-static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct q6v5_wcss *wcss = rproc->priv;
int offset;
@@ -421,13 +752,20 @@ static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, int len)
static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw)
{
struct q6v5_wcss *wcss = rproc->priv;
+ int ret;
+
+ ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
+ 0, wcss->mem_region, wcss->mem_phys,
+ wcss->mem_size, &wcss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size);
- return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware,
- 0, wcss->mem_region, wcss->mem_phys,
- wcss->mem_size, &wcss->mem_reloc);
+ return ret;
}
-static const struct rproc_ops q6v5_wcss_ops = {
+static const struct rproc_ops q6v5_wcss_ipq8074_ops = {
.start = q6v5_wcss_start,
.stop = q6v5_wcss_stop,
.da_to_va = q6v5_wcss_da_to_va,
@@ -435,26 +773,46 @@ static const struct rproc_ops q6v5_wcss_ops = {
.get_boot_addr = rproc_elf_get_boot_addr,
};
-static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
+static const struct rproc_ops q6v5_wcss_qcs404_ops = {
+ .start = q6v5_qcs404_wcss_start,
+ .stop = q6v5_wcss_stop,
+ .da_to_va = q6v5_wcss_da_to_va,
+ .load = q6v5_wcss_load,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+ .parse_fw = qcom_register_dump_segments,
+};
+
+static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss,
+ const struct wcss_data *desc)
{
struct device *dev = wcss->dev;
- wcss->wcss_aon_reset = devm_reset_control_get(dev, "wcss_aon_reset");
- if (IS_ERR(wcss->wcss_aon_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_aon_reset\n");
- return PTR_ERR(wcss->wcss_aon_reset);
+ if (desc->aon_reset_required) {
+ wcss->wcss_aon_reset = devm_reset_control_get_exclusive(dev, "wcss_aon_reset");
+ if (IS_ERR(wcss->wcss_aon_reset)) {
+ dev_err(wcss->dev, "fail to acquire wcss_aon_reset\n");
+ return PTR_ERR(wcss->wcss_aon_reset);
+ }
}
- wcss->wcss_reset = devm_reset_control_get(dev, "wcss_reset");
+ wcss->wcss_reset = devm_reset_control_get_exclusive(dev, "wcss_reset");
if (IS_ERR(wcss->wcss_reset)) {
dev_err(wcss->dev, "unable to acquire wcss_reset\n");
return PTR_ERR(wcss->wcss_reset);
}
- wcss->wcss_q6_reset = devm_reset_control_get(dev, "wcss_q6_reset");
- if (IS_ERR(wcss->wcss_q6_reset)) {
- dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
- return PTR_ERR(wcss->wcss_q6_reset);
+ if (desc->wcss_q6_reset_required) {
+ wcss->wcss_q6_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_reset");
+ if (IS_ERR(wcss->wcss_q6_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_reset\n");
+ return PTR_ERR(wcss->wcss_q6_reset);
+ }
+ }
+
+ wcss->wcss_q6_bcr_reset = devm_reset_control_get_exclusive(dev, "wcss_q6_bcr_reset");
+ if (IS_ERR(wcss->wcss_q6_bcr_reset)) {
+ dev_err(wcss->dev, "unable to acquire wcss_q6_bcr_reset\n");
+ return PTR_ERR(wcss->wcss_q6_bcr_reset);
}
return 0;
@@ -463,35 +821,48 @@ static int q6v5_wcss_init_reset(struct q6v5_wcss *wcss)
static int q6v5_wcss_init_mmio(struct q6v5_wcss *wcss,
struct platform_device *pdev)
{
- struct of_phandle_args args;
+ unsigned int halt_reg[MAX_HALT_REG] = {0};
+ struct device_node *syscon;
struct resource *res;
int ret;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6");
- wcss->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->reg_base))
- return PTR_ERR(wcss->reg_base);
+ wcss->reg_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!wcss->reg_base)
+ return -ENOMEM;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
- wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(wcss->rmb_base))
- return PTR_ERR(wcss->rmb_base);
+ if (wcss->version == WCSS_IPQ8074) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rmb");
+ wcss->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(wcss->rmb_base))
+ return PTR_ERR(wcss->rmb_base);
+ }
- ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
- "qcom,halt-regs", 3, 0, &args);
- if (ret < 0) {
+ syscon = of_parse_phandle(pdev->dev.of_node,
+ "qcom,halt-regs", 0);
+ if (!syscon) {
dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
return -EINVAL;
}
- wcss->halt_map = syscon_node_to_regmap(args.np);
- of_node_put(args.np);
+ wcss->halt_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
if (IS_ERR(wcss->halt_map))
return PTR_ERR(wcss->halt_map);
- wcss->halt_q6 = args.args[0];
- wcss->halt_wcss = args.args[1];
- wcss->halt_nc = args.args[2];
+ ret = of_property_read_variable_u32_array(pdev->dev.of_node,
+ "qcom,halt-regs",
+ halt_reg, 0,
+ MAX_HALT_REG);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to parse qcom,halt-regs\n");
+ return -EINVAL;
+ }
+
+ wcss->halt_q6 = halt_reg[0];
+ wcss->halt_wcss = halt_reg[1];
+ wcss->halt_nc = halt_reg[2];
return 0;
}
@@ -525,14 +896,120 @@ static int q6v5_alloc_memory_region(struct q6v5_wcss *wcss)
return 0;
}
+static int q6v5_wcss_init_clock(struct q6v5_wcss *wcss)
+{
+ int ret;
+
+ wcss->xo = devm_clk_get(wcss->dev, "xo");
+ if (IS_ERR(wcss->xo)) {
+ ret = PTR_ERR(wcss->xo);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get xo clock");
+ return ret;
+ }
+
+ wcss->gcc_abhs_cbcr = devm_clk_get(wcss->dev, "gcc_abhs_cbcr");
+ if (IS_ERR(wcss->gcc_abhs_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_abhs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc abhs clock");
+ return ret;
+ }
+
+ wcss->gcc_axim_cbcr = devm_clk_get(wcss->dev, "gcc_axim_cbcr");
+ if (IS_ERR(wcss->gcc_axim_cbcr)) {
+ ret = PTR_ERR(wcss->gcc_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get gcc axim clock\n");
+ return ret;
+ }
+
+ wcss->ahbfabric_cbcr_clk = devm_clk_get(wcss->dev,
+ "lcc_ahbfabric_cbc");
+ if (IS_ERR(wcss->ahbfabric_cbcr_clk)) {
+ ret = PTR_ERR(wcss->ahbfabric_cbcr_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbfabric clock\n");
+ return ret;
+ }
+
+ wcss->lcc_csr_cbcr = devm_clk_get(wcss->dev, "tcsr_lcc_cbc");
+ if (IS_ERR(wcss->lcc_csr_cbcr)) {
+ ret = PTR_ERR(wcss->lcc_csr_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get csr cbcr clk\n");
+ return ret;
+ }
+
+ wcss->ahbs_cbcr = devm_clk_get(wcss->dev,
+ "lcc_abhs_cbc");
+ if (IS_ERR(wcss->ahbs_cbcr)) {
+ ret = PTR_ERR(wcss->ahbs_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get ahbs_cbcr clk\n");
+ return ret;
+ }
+
+ wcss->tcm_slave_cbcr = devm_clk_get(wcss->dev,
+ "lcc_tcm_slave_cbc");
+ if (IS_ERR(wcss->tcm_slave_cbcr)) {
+ ret = PTR_ERR(wcss->tcm_slave_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get tcm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_abhm_cbcr = devm_clk_get(wcss->dev, "lcc_abhm_cbc");
+ if (IS_ERR(wcss->qdsp6ss_abhm_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_abhm_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get abhm cbcr clk\n");
+ return ret;
+ }
+
+ wcss->qdsp6ss_axim_cbcr = devm_clk_get(wcss->dev, "lcc_axim_cbc");
+ if (IS_ERR(wcss->qdsp6ss_axim_cbcr)) {
+ ret = PTR_ERR(wcss->qdsp6ss_axim_cbcr);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get axim cbcr clk\n");
+ return ret;
+ }
+
+ wcss->lcc_bcr_sleep = devm_clk_get(wcss->dev, "lcc_bcr_sleep");
+ if (IS_ERR(wcss->lcc_bcr_sleep)) {
+ ret = PTR_ERR(wcss->lcc_bcr_sleep);
+ if (ret != -EPROBE_DEFER)
+ dev_err(wcss->dev, "failed to get bcr cbcr clk\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int q6v5_wcss_init_regulator(struct q6v5_wcss *wcss)
+{
+ wcss->cx_supply = devm_regulator_get(wcss->dev, "cx");
+ if (IS_ERR(wcss->cx_supply))
+ return PTR_ERR(wcss->cx_supply);
+
+ regulator_set_load(wcss->cx_supply, 100000);
+
+ return 0;
+}
+
static int q6v5_wcss_probe(struct platform_device *pdev)
{
+ const struct wcss_data *desc;
struct q6v5_wcss *wcss;
struct rproc *rproc;
int ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_wcss_ops,
- "IPQ8074/q6_fw.mdt", sizeof(*wcss));
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -540,6 +1017,10 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
wcss = rproc->priv;
wcss->dev = &pdev->dev;
+ wcss->version = desc->version;
+
+ wcss->version = desc->version;
+ wcss->requires_force_stop = desc->requires_force_stop;
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
@@ -549,14 +1030,32 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (ret)
goto free_rproc;
- ret = q6v5_wcss_init_reset(wcss);
+ if (wcss->version == WCSS_QCS404) {
+ ret = q6v5_wcss_init_clock(wcss);
+ if (ret)
+ goto free_rproc;
+
+ ret = q6v5_wcss_init_regulator(wcss);
+ if (ret)
+ goto free_rproc;
+ }
+
+ ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
goto free_rproc;
- ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, WCSS_CRASH_REASON, NULL);
+ ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, NULL, NULL);
if (ret)
goto free_rproc;
+ qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
+ qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
+
+ if (desc->ssctl_id)
+ wcss->sysmon = qcom_add_sysmon_subdev(rproc,
+ desc->sysmon_name,
+ desc->ssctl_id);
+
ret = rproc_add(rproc);
if (ret)
goto free_rproc;
@@ -574,15 +1073,40 @@ free_rproc:
static int q6v5_wcss_remove(struct platform_device *pdev)
{
struct rproc *rproc = platform_get_drvdata(pdev);
+ struct q6v5_wcss *wcss = rproc->priv;
+ qcom_q6v5_deinit(&wcss->q6v5);
rproc_del(rproc);
rproc_free(rproc);
return 0;
}
+static const struct wcss_data wcss_ipq8074_res_init = {
+ .firmware_name = "IPQ8074/q6_fw.mdt",
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .aon_reset_required = true,
+ .wcss_q6_reset_required = true,
+ .ops = &q6v5_wcss_ipq8074_ops,
+ .requires_force_stop = true,
+};
+
+static const struct wcss_data wcss_qcs404_res_init = {
+ .crash_reason_smem = WCSS_CRASH_REASON,
+ .firmware_name = "wcnss.mdt",
+ .version = WCSS_QCS404,
+ .aon_reset_required = false,
+ .wcss_q6_reset_required = false,
+ .ssr_name = "mpss",
+ .sysmon_name = "wcnss",
+ .ssctl_id = 0x12,
+ .ops = &q6v5_wcss_qcs404_ops,
+ .requires_force_stop = false,
+};
+
static const struct of_device_id q6v5_wcss_of_match[] = {
- { .compatible = "qcom,ipq8074-wcss-pil" },
+ { .compatible = "qcom,ipq8074-wcss-pil", .data = &wcss_ipq8074_res_init },
+ { .compatible = "qcom,qcs404-wcss-pil", .data = &wcss_qcs404_res_init },
{ },
};
MODULE_DEVICE_TABLE(of, q6v5_wcss_of_match);
diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c
index c231314eab66..9fca81492863 100644
--- a/drivers/remoteproc/qcom_sysmon.c
+++ b/drivers/remoteproc/qcom_sysmon.c
@@ -22,6 +22,9 @@ struct qcom_sysmon {
struct rproc_subdev subdev;
struct rproc *rproc;
+ int state;
+ struct mutex state_lock;
+
struct list_head node;
const char *name;
@@ -41,26 +44,48 @@ struct qcom_sysmon {
struct mutex lock;
bool ssr_ack;
+ bool shutdown_acked;
struct qmi_handle qmi;
struct sockaddr_qrtr ssctl;
};
+enum {
+ SSCTL_SSR_EVENT_BEFORE_POWERUP,
+ SSCTL_SSR_EVENT_AFTER_POWERUP,
+ SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+ SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static const char * const sysmon_state_string[] = {
+ [SSCTL_SSR_EVENT_BEFORE_POWERUP] = "before_powerup",
+ [SSCTL_SSR_EVENT_AFTER_POWERUP] = "after_powerup",
+ [SSCTL_SSR_EVENT_BEFORE_SHUTDOWN] = "before_shutdown",
+ [SSCTL_SSR_EVENT_AFTER_SHUTDOWN] = "after_shutdown",
+};
+
+struct sysmon_event {
+ const char *subsys_name;
+ u32 ssr_event;
+};
+
static DEFINE_MUTEX(sysmon_lock);
static LIST_HEAD(sysmon_list);
/**
* sysmon_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
-static void sysmon_send_event(struct qcom_sysmon *sysmon, const char *name)
+static void sysmon_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
{
char req[50];
int len;
int ret;
- len = snprintf(req, sizeof(req), "ssr:%s:before_shutdown", name);
+ len = snprintf(req, sizeof(req), "ssr:%s:%s", event->subsys_name,
+ sysmon_state_string[event->ssr_event]);
if (len >= sizeof(req))
return;
@@ -91,10 +116,13 @@ out_unlock:
/**
* sysmon_request_shutdown() - request graceful shutdown of remote
* @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
*/
-static void sysmon_request_shutdown(struct qcom_sysmon *sysmon)
+static bool sysmon_request_shutdown(struct qcom_sysmon *sysmon)
{
char *req = "ssr:shutdown";
+ bool acked = false;
int ret;
mutex_lock(&sysmon->lock);
@@ -117,9 +145,13 @@ static void sysmon_request_shutdown(struct qcom_sysmon *sysmon)
if (!sysmon->ssr_ack)
dev_err(sysmon->dev,
"unexpected response to sysmon shutdown request\n");
+ else
+ acked = true;
out_unlock:
mutex_unlock(&sysmon->lock);
+
+ return acked;
}
static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
@@ -149,13 +181,6 @@ static int sysmon_callback(struct rpmsg_device *rpdev, void *data, int count,
#define SSCTL_SUBSYS_NAME_LENGTH 15
enum {
- SSCTL_SSR_EVENT_BEFORE_POWERUP,
- SSCTL_SSR_EVENT_AFTER_POWERUP,
- SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
- SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
-};
-
-enum {
SSCTL_SSR_EVENT_FORCED,
SSCTL_SSR_EVENT_GRACEFUL,
};
@@ -269,7 +294,7 @@ static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
complete(&sysmon->ind_comp);
}
-static struct qmi_msg_handler qmi_indication_handler[] = {
+static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SSCTL_SHUTDOWN_READY_IND,
@@ -280,14 +305,33 @@ static struct qmi_msg_handler qmi_indication_handler[] = {
{}
};
+static bool ssctl_request_shutdown_wait(struct qcom_sysmon *sysmon)
+{
+ int ret;
+
+ ret = wait_for_completion_timeout(&sysmon->shutdown_comp, 10 * HZ);
+ if (ret)
+ return true;
+
+ ret = try_wait_for_completion(&sysmon->ind_comp);
+ if (ret)
+ return true;
+
+ dev_err(sysmon->dev, "timeout waiting for shutdown ack\n");
+ return false;
+}
+
/**
* ssctl_request_shutdown() - request shutdown via SSCTL QMI service
* @sysmon: sysmon context
+ *
+ * Return: boolean indicator of the remote processor acking the request
*/
-static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
+static bool ssctl_request_shutdown(struct qcom_sysmon *sysmon)
{
struct ssctl_shutdown_resp resp;
struct qmi_txn txn;
+ bool acked = false;
int ret;
reinit_completion(&sysmon->ind_comp);
@@ -295,7 +339,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
ret = qmi_txn_init(&sysmon->qmi, &txn, ssctl_shutdown_resp_ei, &resp);
if (ret < 0) {
dev_err(sysmon->dev, "failed to allocate QMI txn\n");
- return;
+ return false;
}
ret = qmi_send_request(&sysmon->qmi, &sysmon->ssctl, &txn,
@@ -303,35 +347,32 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon)
if (ret < 0) {
dev_err(sysmon->dev, "failed to send shutdown request\n");
qmi_txn_cancel(&txn);
- return;
+ return false;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
- if (ret < 0)
- dev_err(sysmon->dev, "failed receiving QMI response\n");
- else if (resp.resp.result)
- dev_err(sysmon->dev, "shutdown request failed\n");
- else
+ if (ret < 0) {
+ dev_err(sysmon->dev, "timeout waiting for shutdown response\n");
+ } else if (resp.resp.result) {
+ dev_err(sysmon->dev, "shutdown request rejected\n");
+ } else {
dev_dbg(sysmon->dev, "shutdown request completed\n");
-
- if (sysmon->shutdown_irq > 0) {
- ret = wait_for_completion_timeout(&sysmon->shutdown_comp,
- 10 * HZ);
- if (!ret) {
- ret = try_wait_for_completion(&sysmon->ind_comp);
- if (!ret)
- dev_err(sysmon->dev,
- "timeout waiting for shutdown ack\n");
- }
+ acked = true;
}
+
+ if (sysmon->shutdown_irq > 0)
+ return ssctl_request_shutdown_wait(sysmon);
+
+ return acked;
}
/**
* ssctl_send_event() - send notification of other remote's SSR event
* @sysmon: sysmon context
- * @name: other remote's name
+ * @event: sysmon event context
*/
-static void ssctl_send_event(struct qcom_sysmon *sysmon, const char *name)
+static void ssctl_send_event(struct qcom_sysmon *sysmon,
+ const struct sysmon_event *event)
{
struct ssctl_subsys_event_resp resp;
struct ssctl_subsys_event_req req;
@@ -346,9 +387,9 @@ static void ssctl_send_event(struct qcom_sysmon *sysmon, const char *name)
}
memset(&req, 0, sizeof(req));
- strlcpy(req.subsys_name, name, sizeof(req.subsys_name));
+ strlcpy(req.subsys_name, event->subsys_name, sizeof(req.subsys_name));
req.subsys_name_len = strlen(req.subsys_name);
- req.event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
+ req.event = event->ssr_event;
req.evt_driven_valid = true;
req.evt_driven = SSCTL_SSR_EVENT_FORCED;
@@ -356,18 +397,18 @@ static void ssctl_send_event(struct qcom_sysmon *sysmon, const char *name)
SSCTL_SUBSYS_EVENT_REQ, 40,
ssctl_subsys_event_req_ei, &req);
if (ret < 0) {
- dev_err(sysmon->dev, "failed to send shutdown request\n");
+ dev_err(sysmon->dev, "failed to send subsystem event\n");
qmi_txn_cancel(&txn);
return;
}
ret = qmi_txn_wait(&txn, 5 * HZ);
if (ret < 0)
- dev_err(sysmon->dev, "failed receiving QMI response\n");
+ dev_err(sysmon->dev, "timeout waiting for subsystem event response\n");
else if (resp.resp.result)
- dev_err(sysmon->dev, "ssr event send failed\n");
+ dev_err(sysmon->dev, "subsystem event rejected\n");
else
- dev_dbg(sysmon->dev, "ssr event send completed\n");
+ dev_dbg(sysmon->dev, "subsystem event accepted\n");
}
/**
@@ -394,7 +435,7 @@ static int ssctl_new_server(struct qmi_handle *qmi, struct qmi_service *svc)
break;
default:
return -EINVAL;
- };
+ }
sysmon->ssctl_version = svc->version;
@@ -424,25 +465,105 @@ static const struct qmi_ops ssctl_ops = {
.del_server = ssctl_del_server,
};
+static int sysmon_prepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_POWERUP
+ };
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_POWERUP;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+
+ return 0;
+}
+
+/**
+ * sysmon_start() - start callback for the sysmon remoteproc subdevice
+ * @subdev: instance of the sysmon subdevice
+ *
+ * Inform all the listners of sysmon notifications that the rproc associated
+ * to @subdev has booted up. The rproc that booted up also needs to know
+ * which rprocs are already up and running, so send start notifications
+ * on behalf of all the online rprocs.
+ */
static int sysmon_start(struct rproc_subdev *subdev)
{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct qcom_sysmon *target;
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_POWERUP
+ };
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_POWERUP;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
+
+ mutex_lock(&sysmon_lock);
+ list_for_each_entry(target, &sysmon_list, node) {
+ if (target == sysmon)
+ continue;
+
+ mutex_lock(&target->state_lock);
+ event.subsys_name = target->name;
+ event.ssr_event = target->state;
+
+ if (sysmon->ssctl_version == 2)
+ ssctl_send_event(sysmon, &event);
+ else if (sysmon->ept)
+ sysmon_send_event(sysmon, &event);
+ mutex_unlock(&target->state_lock);
+ }
+ mutex_unlock(&sysmon_lock);
+
return 0;
}
static void sysmon_stop(struct rproc_subdev *subdev, bool crashed)
{
struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon, subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN
+ };
- blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)sysmon->name);
+ sysmon->shutdown_acked = false;
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
/* Don't request graceful shutdown if we've crashed */
if (crashed)
return;
if (sysmon->ssctl_version)
- ssctl_request_shutdown(sysmon);
+ sysmon->shutdown_acked = ssctl_request_shutdown(sysmon);
else if (sysmon->ept)
- sysmon_request_shutdown(sysmon);
+ sysmon->shutdown_acked = sysmon_request_shutdown(sysmon);
+}
+
+static void sysmon_unprepare(struct rproc_subdev *subdev)
+{
+ struct qcom_sysmon *sysmon = container_of(subdev, struct qcom_sysmon,
+ subdev);
+ struct sysmon_event event = {
+ .subsys_name = sysmon->name,
+ .ssr_event = SSCTL_SSR_EVENT_AFTER_SHUTDOWN
+ };
+
+ mutex_lock(&sysmon->state_lock);
+ sysmon->state = SSCTL_SSR_EVENT_AFTER_SHUTDOWN;
+ blocking_notifier_call_chain(&sysmon_notifiers, 0, (void *)&event);
+ mutex_unlock(&sysmon->state_lock);
}
/**
@@ -455,20 +576,20 @@ static int sysmon_notify(struct notifier_block *nb, unsigned long event,
void *data)
{
struct qcom_sysmon *sysmon = container_of(nb, struct qcom_sysmon, nb);
- struct rproc *rproc = sysmon->rproc;
- const char *ssr_name = data;
+ struct sysmon_event *sysmon_event = data;
/* Skip non-running rprocs and the originating instance */
- if (rproc->state != RPROC_RUNNING || !strcmp(data, sysmon->name)) {
+ if (sysmon->state != SSCTL_SSR_EVENT_AFTER_POWERUP ||
+ !strcmp(sysmon_event->subsys_name, sysmon->name)) {
dev_dbg(sysmon->dev, "not notifying %s\n", sysmon->name);
return NOTIFY_DONE;
}
/* Only SSCTL version 2 supports SSR events */
if (sysmon->ssctl_version == 2)
- ssctl_send_event(sysmon, ssr_name);
+ ssctl_send_event(sysmon, sysmon_event);
else if (sysmon->ept)
- sysmon_send_event(sysmon, ssr_name);
+ sysmon_send_event(sysmon, sysmon_event);
return NOTIFY_DONE;
}
@@ -511,6 +632,7 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
init_completion(&sysmon->ind_comp);
init_completion(&sysmon->shutdown_comp);
mutex_init(&sysmon->lock);
+ mutex_init(&sysmon->state_lock);
sysmon->shutdown_irq = of_irq_get_byname(sysmon->dev->of_node,
"shutdown-ack");
@@ -543,8 +665,10 @@ struct qcom_sysmon *qcom_add_sysmon_subdev(struct rproc *rproc,
qmi_add_lookup(&sysmon->qmi, 43, 0, 0);
+ sysmon->subdev.prepare = sysmon_prepare;
sysmon->subdev.start = sysmon_start;
sysmon->subdev.stop = sysmon_stop;
+ sysmon->subdev.unprepare = sysmon_unprepare;
rproc_add_subdev(rproc, &sysmon->subdev);
@@ -583,6 +707,22 @@ void qcom_remove_sysmon_subdev(struct qcom_sysmon *sysmon)
EXPORT_SYMBOL_GPL(qcom_remove_sysmon_subdev);
/**
+ * qcom_sysmon_shutdown_acked() - query the success of the last shutdown
+ * @sysmon: sysmon context
+ *
+ * When sysmon is used to request a graceful shutdown of the remote processor
+ * this can be used by the remoteproc driver to query the success, in order to
+ * know if it should fall back to other means of requesting a shutdown.
+ *
+ * Return: boolean indicator of the success of the last shutdown request
+ */
+bool qcom_sysmon_shutdown_acked(struct qcom_sysmon *sysmon)
+{
+ return sysmon && sysmon->shutdown_acked;
+}
+EXPORT_SYMBOL_GPL(qcom_sysmon_shutdown_acked);
+
+/**
* sysmon_probe() - probe sys_mon channel
* @rpdev: rpmsg device handle
*
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index dc135754bb9c..9a223d394087 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -17,16 +17,18 @@
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/qcom_scm.h>
#include <linux/regulator/consumer.h>
#include <linux/remoteproc.h>
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
-#include <linux/rpmsg/qcom_smd.h>
#include "qcom_common.h"
#include "remoteproc_internal.h"
+#include "qcom_pil_info.h"
#include "qcom_wcnss.h"
#define WCNSS_CRASH_REASON_SMEM 422
@@ -50,12 +52,15 @@
#define WCNSS_PMU_XO_MODE_19p2 0
#define WCNSS_PMU_XO_MODE_48 3
+#define WCNSS_MAX_PDS 2
+
struct wcnss_data {
size_t pmu_offset;
size_t spare_offset;
+ const char *pd_names[WCNSS_MAX_PDS];
const struct wcnss_vreg_info *vregs;
- size_t num_vregs;
+ size_t num_vregs, num_pd_vregs;
};
struct qcom_wcnss {
@@ -79,6 +84,8 @@ struct qcom_wcnss {
struct mutex iris_lock;
struct qcom_iris *iris;
+ struct device *pds[WCNSS_MAX_PDS];
+ size_t num_pds;
struct regulator_bulk_data *vregs;
size_t num_vregs;
@@ -110,45 +117,44 @@ static const struct wcnss_data pronto_v1_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
+ .pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 950000, 1150000, 0 },
{ "vddcx", .super_turbo = true},
{ "vddpx", 1800000, 1800000, 0 },
},
- .num_vregs = 3,
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
};
static const struct wcnss_data pronto_v2_data = {
.pmu_offset = 0x1004,
.spare_offset = 0x1088,
+ .pd_names = { "mx", "cx" },
.vregs = (struct wcnss_vreg_info[]) {
{ "vddmx", 1287500, 1287500, 0 },
{ "vddcx", .super_turbo = true },
{ "vddpx", 1800000, 1800000, 0 },
},
- .num_vregs = 3,
+ .num_pd_vregs = 2,
+ .num_vregs = 1,
};
-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss,
- struct qcom_iris *iris,
- bool use_48mhz_xo)
-{
- mutex_lock(&wcnss->iris_lock);
-
- wcnss->iris = iris;
- wcnss->use_48mhz_xo = use_48mhz_xo;
-
- mutex_unlock(&wcnss->iris_lock);
-}
-
static int wcnss_load(struct rproc *rproc, const struct firmware *fw)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
+ int ret;
- return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
- wcnss->mem_region, wcnss->mem_phys,
- wcnss->mem_size, &wcnss->mem_reloc);
+ ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID,
+ wcnss->mem_region, wcnss->mem_phys,
+ wcnss->mem_size, &wcnss->mem_reloc);
+ if (ret)
+ return ret;
+
+ qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size);
+
+ return 0;
}
static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss)
@@ -211,7 +217,7 @@ static void wcnss_configure_iris(struct qcom_wcnss *wcnss)
static int wcnss_start(struct rproc *rproc)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
- int ret;
+ int ret, i;
mutex_lock(&wcnss->iris_lock);
if (!wcnss->iris) {
@@ -220,9 +226,18 @@ static int wcnss_start(struct rproc *rproc)
goto release_iris_lock;
}
+ for (i = 0; i < wcnss->num_pds; i++) {
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], INT_MAX);
+ ret = pm_runtime_get_sync(wcnss->pds[i]);
+ if (ret < 0) {
+ pm_runtime_put_noidle(wcnss->pds[i]);
+ goto disable_pds;
+ }
+ }
+
ret = regulator_bulk_enable(wcnss->num_vregs, wcnss->vregs);
if (ret)
- goto release_iris_lock;
+ goto disable_pds;
ret = qcom_iris_enable(wcnss->iris);
if (ret)
@@ -254,6 +269,11 @@ disable_iris:
qcom_iris_disable(wcnss->iris);
disable_regulators:
regulator_bulk_disable(wcnss->num_vregs, wcnss->vregs);
+disable_pds:
+ for (i--; i >= 0; i--) {
+ pm_runtime_put(wcnss->pds[i]);
+ dev_pm_genpd_set_performance_state(wcnss->pds[i], 0);
+ }
release_iris_lock:
mutex_unlock(&wcnss->iris_lock);
@@ -287,7 +307,7 @@ static int wcnss_stop(struct rproc *rproc)
return ret;
}
-static void *wcnss_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wcnss_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv;
int offset;
@@ -363,14 +383,54 @@ static irqreturn_t wcnss_stop_ack_interrupt(int irq, void *dev)
return IRQ_HANDLED;
}
+static int wcnss_init_pds(struct qcom_wcnss *wcnss,
+ const char * const pd_names[WCNSS_MAX_PDS])
+{
+ int i, ret;
+
+ for (i = 0; i < WCNSS_MAX_PDS; i++) {
+ if (!pd_names[i])
+ break;
+
+ wcnss->pds[i] = dev_pm_domain_attach_by_name(wcnss->dev, pd_names[i]);
+ if (IS_ERR_OR_NULL(wcnss->pds[i])) {
+ ret = PTR_ERR(wcnss->pds[i]) ? : -ENODATA;
+ for (i--; i >= 0; i--)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+ return ret;
+ }
+ }
+ wcnss->num_pds = i;
+
+ return 0;
+}
+
+static void wcnss_release_pds(struct qcom_wcnss *wcnss)
+{
+ int i;
+
+ for (i = 0; i < wcnss->num_pds; i++)
+ dev_pm_domain_detach(wcnss->pds[i], false);
+}
+
static int wcnss_init_regulators(struct qcom_wcnss *wcnss,
const struct wcnss_vreg_info *info,
- int num_vregs)
+ int num_vregs, int num_pd_vregs)
{
struct regulator_bulk_data *bulk;
int ret;
int i;
+ /*
+ * If attaching the power domains suceeded we can skip requesting
+ * the regulators for the power domains. For old device trees we need to
+ * reserve extra space to manage them through the regulator interface.
+ */
+ if (wcnss->num_pds)
+ info += num_pd_vregs;
+ else
+ num_vregs += num_pd_vregs;
+
bulk = devm_kcalloc(wcnss->dev,
num_vregs, sizeof(struct regulator_bulk_data),
GFP_KERNEL);
@@ -440,6 +500,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
}
ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
if (ret)
return ret;
@@ -457,6 +518,7 @@ static int wcnss_alloc_memory_region(struct qcom_wcnss *wcnss)
static int wcnss_probe(struct platform_device *pdev)
{
+ const char *fw_name = WCNSS_FIRMWARE_NAME;
const struct wcnss_data *data;
struct qcom_wcnss *wcnss;
struct resource *res;
@@ -474,12 +536,18 @@ static int wcnss_probe(struct platform_device *pdev)
return -ENXIO;
}
+ ret = of_property_read_string(pdev->dev.of_node, "firmware-name",
+ &fw_name);
+ if (ret < 0 && ret != -EINVAL)
+ return ret;
+
rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
- WCNSS_FIRMWARE_NAME, sizeof(*wcnss));
+ fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
}
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
wcnss = (struct qcom_wcnss *)rproc->priv;
wcnss->dev = &pdev->dev;
@@ -496,7 +564,7 @@ static int wcnss_probe(struct platform_device *pdev)
if (IS_ERR(mmio)) {
ret = PTR_ERR(mmio);
goto free_rproc;
- };
+ }
ret = wcnss_alloc_memory_region(wcnss);
if (ret)
@@ -505,41 +573,50 @@ static int wcnss_probe(struct platform_device *pdev)
wcnss->pmu_cfg = mmio + data->pmu_offset;
wcnss->spare_out = mmio + data->spare_offset;
- ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs);
- if (ret)
+ /*
+ * We might need to fallback to regulators instead of power domains
+ * for old device trees. Don't report an error in that case.
+ */
+ ret = wcnss_init_pds(wcnss, data->pd_names);
+ if (ret && (ret != -ENODATA || !data->num_pd_vregs))
goto free_rproc;
+ ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
+ data->num_pd_vregs);
+ if (ret)
+ goto detach_pds;
+
ret = wcnss_request_irq(wcnss, pdev, "wdog", false, wcnss_wdog_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->wdog_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "fatal", false, wcnss_fatal_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->fatal_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "ready", true, wcnss_ready_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->ready_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "handover", true, wcnss_handover_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->handover_irq = ret;
ret = wcnss_request_irq(wcnss, pdev, "stop-ack", true, wcnss_stop_ack_interrupt);
if (ret < 0)
- goto free_rproc;
+ goto detach_pds;
wcnss->stop_ack_irq = ret;
if (wcnss->stop_ack_irq) {
- wcnss->state = qcom_smem_state_get(&pdev->dev, "stop",
- &wcnss->stop_bit);
+ wcnss->state = devm_qcom_smem_state_get(&pdev->dev, "stop",
+ &wcnss->stop_bit);
if (IS_ERR(wcnss->state)) {
ret = PTR_ERR(wcnss->state);
- goto free_rproc;
+ goto detach_pds;
}
}
@@ -547,15 +624,25 @@ static int wcnss_probe(struct platform_device *pdev)
wcnss->sysmon = qcom_add_sysmon_subdev(rproc, "wcnss", WCNSS_SSCTL_ID);
if (IS_ERR(wcnss->sysmon)) {
ret = PTR_ERR(wcnss->sysmon);
- goto free_rproc;
+ goto detach_pds;
+ }
+
+ wcnss->iris = qcom_iris_probe(&pdev->dev, &wcnss->use_48mhz_xo);
+ if (IS_ERR(wcnss->iris)) {
+ ret = PTR_ERR(wcnss->iris);
+ goto detach_pds;
}
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ goto remove_iris;
- return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ return 0;
+remove_iris:
+ qcom_iris_remove(wcnss->iris);
+detach_pds:
+ wcnss_release_pds(wcnss);
free_rproc:
rproc_free(rproc);
@@ -566,13 +653,13 @@ static int wcnss_remove(struct platform_device *pdev)
{
struct qcom_wcnss *wcnss = platform_get_drvdata(pdev);
- of_platform_depopulate(&pdev->dev);
+ qcom_iris_remove(wcnss->iris);
- qcom_smem_state_put(wcnss->state);
rproc_del(wcnss->rproc);
qcom_remove_sysmon_subdev(wcnss->sysmon);
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
+ wcnss_release_pds(wcnss);
rproc_free(wcnss->rproc);
return 0;
@@ -595,28 +682,7 @@ static struct platform_driver wcnss_driver = {
},
};
-static int __init wcnss_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&wcnss_driver);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&qcom_iris_driver);
- if (ret)
- platform_driver_unregister(&wcnss_driver);
-
- return ret;
-}
-module_init(wcnss_init);
-
-static void __exit wcnss_exit(void)
-{
- platform_driver_unregister(&qcom_iris_driver);
- platform_driver_unregister(&wcnss_driver);
-}
-module_exit(wcnss_exit);
+module_platform_driver(wcnss_driver);
MODULE_DESCRIPTION("Qualcomm Peripheral Image Loader for Wireless Subsystem");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/remoteproc/qcom_wcnss.h b/drivers/remoteproc/qcom_wcnss.h
index 62c8682d0a92..6d01ee6afa7f 100644
--- a/drivers/remoteproc/qcom_wcnss.h
+++ b/drivers/remoteproc/qcom_wcnss.h
@@ -17,9 +17,9 @@ struct wcnss_vreg_info {
bool super_turbo;
};
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo);
+void qcom_iris_remove(struct qcom_iris *iris);
int qcom_iris_enable(struct qcom_iris *iris);
void qcom_iris_disable(struct qcom_iris *iris);
-void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, struct qcom_iris *iris, bool use_48mhz_xo);
-
#endif
diff --git a/drivers/remoteproc/qcom_wcnss_iris.c b/drivers/remoteproc/qcom_wcnss_iris.c
index 0e0ae1e764ea..09720ddddc85 100644
--- a/drivers/remoteproc/qcom_wcnss_iris.c
+++ b/drivers/remoteproc/qcom_wcnss_iris.c
@@ -17,7 +17,7 @@
#include "qcom_wcnss.h"
struct qcom_iris {
- struct device *dev;
+ struct device dev;
struct clk *xo_clk;
@@ -75,7 +75,7 @@ int qcom_iris_enable(struct qcom_iris *iris)
ret = clk_prepare_enable(iris->xo_clk);
if (ret) {
- dev_err(iris->dev, "failed to enable xo clk\n");
+ dev_err(&iris->dev, "failed to enable xo clk\n");
goto disable_regulators;
}
@@ -93,43 +93,90 @@ void qcom_iris_disable(struct qcom_iris *iris)
regulator_bulk_disable(iris->num_vregs, iris->vregs);
}
-static int qcom_iris_probe(struct platform_device *pdev)
+static const struct of_device_id iris_of_match[] = {
+ { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
+ { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
+ { .compatible = "qcom,wcn3660b", .data = &wcn3680_data },
+ { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
+ {}
+};
+
+static void qcom_iris_release(struct device *dev)
{
+ struct qcom_iris *iris = container_of(dev, struct qcom_iris, dev);
+
+ of_node_put(iris->dev.of_node);
+ kfree(iris);
+}
+
+struct qcom_iris *qcom_iris_probe(struct device *parent, bool *use_48mhz_xo)
+{
+ const struct of_device_id *match;
const struct iris_data *data;
- struct qcom_wcnss *wcnss;
+ struct device_node *of_node;
struct qcom_iris *iris;
int ret;
int i;
- iris = devm_kzalloc(&pdev->dev, sizeof(struct qcom_iris), GFP_KERNEL);
- if (!iris)
- return -ENOMEM;
+ of_node = of_get_child_by_name(parent->of_node, "iris");
+ if (!of_node) {
+ dev_err(parent, "No child node \"iris\" found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ iris = kzalloc(sizeof(*iris), GFP_KERNEL);
+ if (!iris) {
+ of_node_put(of_node);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ device_initialize(&iris->dev);
+ iris->dev.parent = parent;
+ iris->dev.release = qcom_iris_release;
+ iris->dev.of_node = of_node;
+
+ dev_set_name(&iris->dev, "%s.iris", dev_name(parent));
+
+ ret = device_add(&iris->dev);
+ if (ret) {
+ put_device(&iris->dev);
+ return ERR_PTR(ret);
+ }
+
+ match = of_match_device(iris_of_match, &iris->dev);
+ if (!match) {
+ dev_err(&iris->dev, "no matching compatible for iris\n");
+ ret = -EINVAL;
+ goto err_device_del;
+ }
- data = of_device_get_match_data(&pdev->dev);
- wcnss = dev_get_drvdata(pdev->dev.parent);
+ data = match->data;
- iris->xo_clk = devm_clk_get(&pdev->dev, "xo");
+ iris->xo_clk = devm_clk_get(&iris->dev, "xo");
if (IS_ERR(iris->xo_clk)) {
- if (PTR_ERR(iris->xo_clk) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to acquire xo clk\n");
- return PTR_ERR(iris->xo_clk);
+ ret = PTR_ERR(iris->xo_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&iris->dev, "failed to acquire xo clk\n");
+ goto err_device_del;
}
iris->num_vregs = data->num_vregs;
- iris->vregs = devm_kcalloc(&pdev->dev,
+ iris->vregs = devm_kcalloc(&iris->dev,
iris->num_vregs,
sizeof(struct regulator_bulk_data),
GFP_KERNEL);
- if (!iris->vregs)
- return -ENOMEM;
+ if (!iris->vregs) {
+ ret = -ENOMEM;
+ goto err_device_del;
+ }
for (i = 0; i < iris->num_vregs; i++)
iris->vregs[i].supply = data->vregs[i].name;
- ret = devm_regulator_bulk_get(&pdev->dev, iris->num_vregs, iris->vregs);
+ ret = devm_regulator_bulk_get(&iris->dev, iris->num_vregs, iris->vregs);
if (ret) {
- dev_err(&pdev->dev, "failed to get regulators\n");
- return ret;
+ dev_err(&iris->dev, "failed to get regulators\n");
+ goto err_device_del;
}
for (i = 0; i < iris->num_vregs; i++) {
@@ -143,33 +190,17 @@ static int qcom_iris_probe(struct platform_device *pdev)
data->vregs[i].load_uA);
}
- qcom_wcnss_assign_iris(wcnss, iris, data->use_48mhz_xo);
-
- return 0;
-}
+ *use_48mhz_xo = data->use_48mhz_xo;
-static int qcom_iris_remove(struct platform_device *pdev)
-{
- struct qcom_wcnss *wcnss = dev_get_drvdata(pdev->dev.parent);
+ return iris;
- qcom_wcnss_assign_iris(wcnss, NULL, false);
+err_device_del:
+ device_del(&iris->dev);
- return 0;
+ return ERR_PTR(ret);
}
-static const struct of_device_id iris_of_match[] = {
- { .compatible = "qcom,wcn3620", .data = &wcn3620_data },
- { .compatible = "qcom,wcn3660", .data = &wcn3660_data },
- { .compatible = "qcom,wcn3680", .data = &wcn3680_data },
- {}
-};
-MODULE_DEVICE_TABLE(of, iris_of_match);
-
-struct platform_driver qcom_iris_driver = {
- .probe = qcom_iris_probe,
- .remove = qcom_iris_remove,
- .driver = {
- .name = "qcom-iris",
- .of_match_table = iris_of_match,
- },
-};
+void qcom_iris_remove(struct qcom_iris *iris)
+{
+ device_del(&iris->dev);
+}
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
new file mode 100644
index 000000000000..aa86154109c7
--- /dev/null
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IoT.bzh 2021
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include "remoteproc_internal.h"
+
+struct rcar_rproc {
+ struct reset_control *rst;
+};
+
+static int rcar_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+ void *va;
+
+ dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int rcar_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int rcar_rproc_prepare(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(&rproc->dev,
+ "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (rmem->base > U32_MAX)
+ return -EINVAL;
+
+ /* No need to translate pa to da, R-Car use same map */
+ da = rmem->base;
+ mem = rproc_mem_entry_init(dev, NULL,
+ rmem->base,
+ rmem->size, da,
+ rcar_rproc_mem_alloc,
+ rcar_rproc_mem_release,
+ it.node->name);
+
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static int rcar_rproc_start(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ if (!rproc->bootaddr)
+ return -EINVAL;
+
+ err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set rproc boot addr\n");
+ return err;
+ }
+
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to deassert reset\n");
+
+ return err;
+}
+
+static int rcar_rproc_stop(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to assert reset\n");
+
+ return err;
+}
+
+static struct rproc_ops rcar_rproc_ops = {
+ .prepare = rcar_rproc_prepare,
+ .start = rcar_rproc_start,
+ .stop = rcar_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = rcar_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+
+};
+
+static int rcar_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct rcar_rproc *priv;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops,
+ NULL, sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ dev_err_probe(dev, ret, "fail to acquire rproc reset\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to power up\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ /* Manually start the rproc */
+ rproc->auto_boot = false;
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto pm_disable;
+ }
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rcar_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_rproc_of_match[] = {
+ { .compatible = "renesas,rcar-cr7" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
+
+static struct platform_driver rcar_rproc_driver = {
+ .probe = rcar_rproc_probe,
+ .remove = rcar_rproc_remove,
+ .driver = {
+ .name = "rcar-rproc",
+ .of_match_table = rcar_rproc_of_match,
+ },
+};
+
+module_platform_driver(rcar_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver");
+MODULE_AUTHOR("Julien Massot <julien.massot@iot.bzh>");
diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c
new file mode 100644
index 000000000000..687f205fd70a
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_cdev.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Character device interface driver for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/remoteproc.h>
+#include <linux/uaccess.h>
+#include <uapi/linux/remoteproc_cdev.h>
+
+#include "remoteproc_internal.h"
+
+#define NUM_RPROC_DEVICES 64
+static dev_t rproc_major;
+
+static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+ char cmd[10];
+
+ if (!len || len > sizeof(cmd))
+ return -EINVAL;
+
+ ret = copy_from_user(cmd, buf, len);
+ if (ret)
+ return -EFAULT;
+
+ if (!strncmp(cmd, "start", len)) {
+ ret = rproc_boot(rproc);
+ } else if (!strncmp(cmd, "stop", len)) {
+ ret = rproc_shutdown(rproc);
+ } else if (!strncmp(cmd, "detach", len)) {
+ ret = rproc_detach(rproc);
+ } else {
+ dev_err(&rproc->dev, "Unrecognized option\n");
+ ret = -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev);
+ void __user *argp = (void __user *)arg;
+ s32 param;
+
+ switch (ioctl) {
+ case RPROC_SET_SHUTDOWN_ON_RELEASE:
+ if (copy_from_user(&param, argp, sizeof(s32)))
+ return -EFAULT;
+
+ rproc->cdev_put_on_release = !!param;
+ break;
+ case RPROC_GET_SHUTDOWN_ON_RELEASE:
+ param = (s32)rproc->cdev_put_on_release;
+ if (copy_to_user(argp, &param, sizeof(s32)))
+ return -EFAULT;
+
+ break;
+ default:
+ dev_err(&rproc->dev, "Unsupported ioctl\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rproc_cdev_release(struct inode *inode, struct file *filp)
+{
+ struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev);
+ int ret = 0;
+
+ if (!rproc->cdev_put_on_release)
+ return 0;
+
+ if (rproc->state == RPROC_RUNNING)
+ rproc_shutdown(rproc);
+ else if (rproc->state == RPROC_ATTACHED)
+ ret = rproc_detach(rproc);
+
+ return ret;
+}
+
+static const struct file_operations rproc_fops = {
+ .write = rproc_cdev_write,
+ .unlocked_ioctl = rproc_device_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .release = rproc_cdev_release,
+};
+
+int rproc_char_device_add(struct rproc *rproc)
+{
+ int ret;
+
+ cdev_init(&rproc->cdev, &rproc_fops);
+ rproc->cdev.owner = THIS_MODULE;
+
+ rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index);
+ cdev_set_parent(&rproc->cdev, &rproc->dev.kobj);
+ ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1);
+ if (ret < 0)
+ dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name);
+
+ return ret;
+}
+
+void rproc_char_device_remove(struct rproc *rproc)
+{
+ cdev_del(&rproc->cdev);
+}
+
+void __init rproc_init_cdev(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc");
+ if (ret < 0)
+ pr_err("Failed to alloc rproc_cdev region, err %d\n", ret);
+}
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 307df98347ba..02a04ab34a23 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -16,16 +16,20 @@
#define pr_fmt(fmt) "%s: " fmt, __func__
+#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/panic_notifier.h>
#include <linux/slab.h>
#include <linux/mutex.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h> /* XXX: pokes into bus_dma_range */
#include <linux/firmware.h>
#include <linux/string.h>
#include <linux/debugfs.h>
-#include <linux/devcoredump.h>
+#include <linux/rculist.h>
#include <linux/remoteproc.h>
#include <linux/iommu.h>
#include <linux/idr.h>
@@ -43,6 +47,7 @@
static DEFINE_MUTEX(rproc_list_mutex);
static LIST_HEAD(rproc_list);
+static struct notifier_block rproc_panic_nb;
typedef int (*rproc_handle_resource_t)(struct rproc *rproc,
void *, int offset, int avail);
@@ -161,6 +166,7 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* @rproc: handle of a remote processor
* @da: remoteproc device address to translate
* @len: length of the memory region @da is pointing to
+ * @is_iomem: optional pointer filled in to indicate if @da is iomapped memory
*
* Some remote processors will ask us to allocate them physically contiguous
* memory regions (which we call "carveouts"), and map them to specific
@@ -178,20 +184,20 @@ EXPORT_SYMBOL(rproc_va_to_pa);
* translations on the internal remoteproc memory regions through a platform
* implementation specific da_to_va ops, if present.
*
- * The function returns a valid kernel address on success or NULL on failure.
- *
* Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too,
* but only on kernel direct mapped RAM memory. Instead, we're just using
* here the output of the DMA API for the carveouts, which should be more
* correct.
+ *
+ * Return: a valid kernel address on success or NULL on failure
*/
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct rproc_mem_entry *carveout;
void *ptr = NULL;
if (rproc->ops->da_to_va) {
- ptr = rproc->ops->da_to_va(rproc, da, len);
+ ptr = rproc->ops->da_to_va(rproc, da, len, is_iomem);
if (ptr)
goto out;
}
@@ -213,6 +219,9 @@ void *rproc_da_to_va(struct rproc *rproc, u64 da, int len)
ptr = carveout->va + offset;
+ if (is_iomem)
+ *is_iomem = carveout->is_iomem;
+
break;
}
@@ -224,7 +233,8 @@ EXPORT_SYMBOL(rproc_da_to_va);
/**
* rproc_find_carveout_by_name() - lookup the carveout region by a name
* @rproc: handle of a remote processor
- * @name,..: carveout name to find (standard printf format)
+ * @name: carveout name to find (format string)
+ * @...: optional parameters matching @name string
*
* Platform driver has the capability to register some pre-allacoted carveout
* (physically contiguous memory regions) before rproc firmware loading and
@@ -238,6 +248,7 @@ EXPORT_SYMBOL(rproc_da_to_va);
*
* Return: a valid pointer on carveout entry on success or NULL on failure.
*/
+__printf(2, 3)
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...)
{
@@ -318,8 +329,9 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
struct device *dev = &rproc->dev;
struct rproc_vring *rvring = &rvdev->vring[i];
struct fw_rsc_vdev *rsc;
- int ret, size, notifyid;
+ int ret, notifyid;
struct rproc_mem_entry *mem;
+ size_t size;
/* actual size of vring (in bytes) */
size = PAGE_ALIGN(vring_size(rvring->len, rvring->align));
@@ -404,10 +416,22 @@ void rproc_free_vring(struct rproc_vring *rvring)
idr_remove(&rproc->notifyids, rvring->notifyid);
- /* reset resource entry info */
- rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
- rsc->vring[idx].da = 0;
- rsc->vring[idx].notifyid = -1;
+ /*
+ * At this point rproc_stop() has been called and the installed resource
+ * table in the remote processor memory may no longer be accessible. As
+ * such and as per rproc_stop(), rproc->table_ptr points to the cached
+ * resource table (rproc->cached_table). The cached resource table is
+ * only available when a remote processor has been booted by the
+ * remoteproc core, otherwise it is NULL.
+ *
+ * Based on the above, reset the virtio device section in the cached
+ * resource table only if there is one to work with.
+ */
+ if (rproc->table_ptr) {
+ rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset;
+ rsc->vring[idx].da = 0;
+ rsc->vring[idx].notifyid = -1;
+ }
}
static int rproc_vdev_do_start(struct rproc_subdev *subdev)
@@ -441,10 +465,30 @@ static void rproc_rvdev_release(struct device *dev)
kfree(rvdev);
}
+static int copy_dma_range_map(struct device *to, struct device *from)
+{
+ const struct bus_dma_region *map = from->dma_range_map, *new_map, *r;
+ int num_ranges = 0;
+
+ if (!map)
+ return 0;
+
+ for (r = map; r->size; r++)
+ num_ranges++;
+
+ new_map = kmemdup(map, array_size(num_ranges + 1, sizeof(*map)),
+ GFP_KERNEL);
+ if (!new_map)
+ return -ENOMEM;
+ to->dma_range_map = new_map;
+ return 0;
+}
+
/**
* rproc_handle_vdev() - handle a vdev fw resource
* @rproc: the remote processor
- * @rsc: the vring resource descriptor
+ * @ptr: the vring resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* This resource entry requests the host to statically register a virtio
@@ -466,19 +510,20 @@ static void rproc_rvdev_release(struct device *dev)
* use RSC_DEVMEM resource entries to map their required @da to the physical
* address of their base CMA region (ouch, hacky!).
*
- * Returns 0 on success, or an appropriate error code otherwise
+ * Return: 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
+static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_vdev *rsc = ptr;
struct device *dev = &rproc->dev;
struct rproc_vdev *rvdev;
int i, ret;
char name[16];
/* make sure resource isn't truncated */
- if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring)
- + rsc->config_len > avail) {
+ if (struct_size(rsc, vring, rsc->num_of_vrings) + rsc->config_len >
+ avail) {
dev_err(dev, "vdev rsc is truncated\n");
return -EINVAL;
}
@@ -510,8 +555,7 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* Initialise vdev subdevice */
snprintf(name, sizeof(name), "vdev%dbuffer", rvdev->index);
- rvdev->dev.parent = rproc->dev.parent;
- rvdev->dev.dma_pfn_offset = rproc->dev.parent->dma_pfn_offset;
+ rvdev->dev.parent = &rproc->dev;
rvdev->dev.release = rproc_rvdev_release;
dev_set_name(&rvdev->dev, "%s#%s", dev_name(rvdev->dev.parent), name);
dev_set_drvdata(&rvdev->dev, rvdev);
@@ -521,6 +565,11 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
put_device(&rvdev->dev);
return ret;
}
+
+ ret = copy_dma_range_map(&rvdev->dev, rproc->dev.parent);
+ if (ret)
+ goto free_rvdev;
+
/* Make device dma capable by inheriting from parent's capabilities */
set_dma_ops(&rvdev->dev, get_dma_ops(rproc->dev.parent));
@@ -528,8 +577,8 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
dma_get_mask(rproc->dev.parent));
if (ret) {
dev_warn(dev,
- "Failed to set DMA mask %llx. Trying to continue... %x\n",
- dma_get_mask(rproc->dev.parent), ret);
+ "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
}
/* parse the vrings */
@@ -586,7 +635,8 @@ void rproc_vdev_release(struct kref *ref)
/**
* rproc_handle_trace() - handle a shared trace buffer resource
* @rproc: the remote processor
- * @rsc: the trace resource descriptor
+ * @ptr: the trace resource descriptor
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* In case the remote processor dumps trace logs into memory,
@@ -597,11 +647,12 @@ void rproc_vdev_release(struct kref *ref)
* support dynamically allocating this address using the generic
* DMA API (but currently there isn't a use case for that).
*
- * Returns 0 on success, or an appropriate error code otherwise
+ * Return: 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
+static int rproc_handle_trace(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_trace *rsc = ptr;
struct rproc_debug_trace *trace;
struct device *dev = &rproc->dev;
char name[15];
@@ -633,10 +684,6 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/* create the debugfs entry */
trace->tfile = rproc_create_trace_file(name, rproc, trace);
- if (!trace->tfile) {
- kfree(trace);
- return -EINVAL;
- }
list_add_tail(&trace->node, &rproc->traces);
@@ -651,7 +698,8 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
/**
* rproc_handle_devmem() - handle devmem resource entry
* @rproc: remote processor handle
- * @rsc: the devmem resource entry
+ * @ptr: the devmem resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for sanity checking the image)
*
* Remote processors commonly need to access certain on-chip peripherals.
@@ -672,10 +720,13 @@ static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc,
* tell us ranges of physical addresses the firmware is allowed to request,
* and not allow firmwares to request access to physical addresses that
* are outside those ranges.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
-static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc,
+static int rproc_handle_devmem(struct rproc *rproc, void *ptr,
int offset, int avail)
{
+ struct fw_rsc_devmem *rsc = ptr;
struct rproc_mem_entry *mapping;
struct device *dev = &rproc->dev;
int ret;
@@ -733,6 +784,8 @@ out:
*
* This function allocate specified memory entry @mem using
* dma_alloc_coherent() as default allocator
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_alloc_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
@@ -746,11 +799,12 @@ static int rproc_alloc_carveout(struct rproc *rproc,
va = dma_alloc_coherent(dev->parent, mem->len, &dma, GFP_KERNEL);
if (!va) {
dev_err(dev->parent,
- "failed to allocate dma memory: len 0x%x\n", mem->len);
+ "failed to allocate dma memory: len 0x%zx\n",
+ mem->len);
return -ENOMEM;
}
- dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%x\n",
+ dev_dbg(dev, "carveout va %pK, dma %pad, len 0x%zx\n",
va, &dma, mem->len);
if (mem->da != FW_RSC_ADDR_ANY && !rproc->domain) {
@@ -838,6 +892,8 @@ dma_free:
*
* This function releases specified memory entry @mem allocated via
* rproc_alloc_carveout() function by @rproc.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_release_carveout(struct rproc *rproc,
struct rproc_mem_entry *mem)
@@ -852,7 +908,8 @@ static int rproc_release_carveout(struct rproc *rproc,
/**
* rproc_handle_carveout() - handle phys contig memory allocation requests
* @rproc: rproc handle
- * @rsc: the resource entry
+ * @ptr: the resource entry
+ * @offset: offset of the resource entry
* @avail: size of available data (for image validation)
*
* This function will handle firmware requests for allocation of physically
@@ -866,11 +923,13 @@ static int rproc_release_carveout(struct rproc *rproc,
* (e.g. CMA) more efficiently, and also minimizes the number of TLB entries
* needed to map it (in case @rproc is using an IOMMU). Reducing the TLB
* pressure is important; it may have a substantial impact on performance.
+ *
+ * Return: 0 on success, or an appropriate error code otherwise
*/
static int rproc_handle_carveout(struct rproc *rproc,
- struct fw_rsc_carveout *rsc,
- int offset, int avail)
+ void *ptr, int offset, int avail)
{
+ struct fw_rsc_carveout *rsc = ptr;
struct rproc_mem_entry *carveout;
struct device *dev = &rproc->dev;
@@ -954,10 +1013,13 @@ EXPORT_SYMBOL(rproc_add_carveout);
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
*/
+__printf(8, 9)
struct rproc_mem_entry *
rproc_mem_entry_init(struct device *dev,
- void *va, dma_addr_t dma, int len, u32 da,
+ void *va, dma_addr_t dma, size_t len, u32 da,
int (*alloc)(struct rproc *, struct rproc_mem_entry *),
int (*release)(struct rproc *, struct rproc_mem_entry *),
const char *name, ...)
@@ -997,9 +1059,12 @@ EXPORT_SYMBOL(rproc_mem_entry_init);
*
* This function allocates a rproc_mem_entry struct and fill it with parameters
* provided by client.
+ *
+ * Return: a valid pointer on success, or NULL on failure
*/
+__printf(5, 6)
struct rproc_mem_entry *
-rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
+rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len,
u32 da, const char *name, ...)
{
struct rproc_mem_entry *mem;
@@ -1023,14 +1088,37 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, int len,
EXPORT_SYMBOL(rproc_of_resm_mem_entry_init);
/**
+ * rproc_of_parse_firmware() - parse and return the firmware-name
+ * @dev: pointer on device struct representing a rproc
+ * @index: index to use for the firmware-name retrieval
+ * @fw_name: pointer to a character string, in which the firmware
+ * name is returned on success and unmodified otherwise.
+ *
+ * This is an OF helper function that parses a device's DT node for
+ * the "firmware-name" property and returns the firmware name pointer
+ * in @fw_name on success.
+ *
+ * Return: 0 on success, or an appropriate failure.
+ */
+int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name)
+{
+ int ret;
+
+ ret = of_property_read_string_index(dev->of_node, "firmware-name",
+ index, fw_name);
+ return ret ? ret : 0;
+}
+EXPORT_SYMBOL(rproc_of_parse_firmware);
+
+/*
* A lookup table for resource handlers. The indices are defined in
* enum fw_resource_type.
*/
static rproc_handle_resource_t rproc_loading_handlers[RSC_LAST] = {
- [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout,
- [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem,
- [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace,
- [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev,
+ [RSC_CARVEOUT] = rproc_handle_carveout,
+ [RSC_DEVMEM] = rproc_handle_devmem,
+ [RSC_TRACE] = rproc_handle_trace,
+ [RSC_VDEV] = rproc_handle_vdev,
};
/* handle firmware resource entries before booting the remote processor */
@@ -1227,19 +1315,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc)
return 0;
}
-/**
- * rproc_coredump_cleanup() - clean up dump_segments list
- * @rproc: the remote processor handle
- */
-static void rproc_coredump_cleanup(struct rproc *rproc)
-{
- struct rproc_dump_segment *entry, *tmp;
-
- list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
- list_del(&entry->node);
- kfree(entry);
- }
-}
/**
* rproc_resource_cleanup() - clean up and free all acquired resources
@@ -1248,7 +1323,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc)
* This function will free all resources acquired for @rproc, and it
* is called whenever @rproc either shuts down or fails to boot.
*/
-static void rproc_resource_cleanup(struct rproc *rproc)
+void rproc_resource_cleanup(struct rproc *rproc)
{
struct rproc_mem_entry *entry, *tmp;
struct rproc_debug_trace *trace, *ttmp;
@@ -1270,7 +1345,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
if (unmapped != entry->len) {
/* nothing much to do besides complaining */
- dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
+ dev_err(dev, "failed to unmap %zx/%zu\n", entry->len,
unmapped);
}
@@ -1292,6 +1367,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
rproc_coredump_cleanup(rproc);
}
+EXPORT_SYMBOL(rproc_resource_cleanup);
static int rproc_start(struct rproc *rproc, const struct firmware *fw)
{
@@ -1358,6 +1434,48 @@ reset_table_ptr:
return ret;
}
+static int __rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = rproc_prepare_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to prepare subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto out;
+ }
+
+ /* Attach to the remote processor */
+ ret = rproc_attach_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't attach to rproc %s: %d\n",
+ rproc->name, ret);
+ goto unprepare_subdevices;
+ }
+
+ /* Start any subdevices for the remote processor */
+ ret = rproc_start_subdevices(rproc);
+ if (ret) {
+ dev_err(dev, "failed to probe subdevices for %s: %d\n",
+ rproc->name, ret);
+ goto stop_rproc;
+ }
+
+ rproc->state = RPROC_ATTACHED;
+
+ dev_info(dev, "remote processor %s is now attached\n", rproc->name);
+
+ return 0;
+
+stop_rproc:
+ rproc->ops->stop(rproc);
+unprepare_subdevices:
+ rproc_unprepare_subdevices(rproc);
+out:
+ return ret;
+}
+
/*
* take a firmware and boot a remote processor with it.
*/
@@ -1383,12 +1501,19 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
return ret;
}
+ /* Prepare rproc for firmware loading if needed */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
rproc->bootaddr = rproc_get_boot_addr(rproc, fw);
/* Load resource table, core dump segment list etc from the firmware */
ret = rproc_parse_fw(rproc, fw);
if (ret)
- goto disable_iommu;
+ goto unprepare_rproc;
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -1422,6 +1547,220 @@ clean_up_resources:
kfree(rproc->cached_table);
rproc->cached_table = NULL;
rproc->table_ptr = NULL;
+unprepare_rproc:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+disable_iommu:
+ rproc_disable_iommu(rproc);
+ return ret;
+}
+
+static int rproc_set_rsc_table(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+ struct device *dev = &rproc->dev;
+ size_t table_sz;
+ int ret;
+
+ table_ptr = rproc_get_loaded_rsc_table(rproc, &table_sz);
+ if (!table_ptr) {
+ /* Not having a resource table is acceptable */
+ return 0;
+ }
+
+ if (IS_ERR(table_ptr)) {
+ ret = PTR_ERR(table_ptr);
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * If it is possible to detach the remote processor, keep an untouched
+ * copy of the resource table. That way we can start fresh again when
+ * the remote processor is re-attached, that is:
+ *
+ * DETACHED -> ATTACHED -> DETACHED -> ATTACHED
+ *
+ * Free'd in rproc_reset_rsc_table_on_detach() and
+ * rproc_reset_rsc_table_on_stop().
+ */
+ if (rproc->ops->detach) {
+ rproc->clean_table = kmemdup(table_ptr, table_sz, GFP_KERNEL);
+ if (!rproc->clean_table)
+ return -ENOMEM;
+ } else {
+ rproc->clean_table = NULL;
+ }
+
+ rproc->cached_table = NULL;
+ rproc->table_ptr = table_ptr;
+ rproc->table_sz = table_sz;
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_detach(struct rproc *rproc)
+{
+ struct resource_table *table_ptr;
+
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If we made it to this point a clean_table _must_ have been
+ * allocated in rproc_set_rsc_table(). If one isn't present
+ * something went really wrong and we must complain.
+ */
+ if (WARN_ON(!rproc->clean_table))
+ return -EINVAL;
+
+ /* Remember where the external entity installed the resource table */
+ table_ptr = rproc->table_ptr;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_detach().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+
+ /*
+ * Reset the memory area where the firmware loaded the resource table
+ * to its original value. That way when we re-attach the remote
+ * processor the resource table is clean and ready to be used again.
+ */
+ memcpy(table_ptr, rproc->clean_table, rproc->table_sz);
+
+ /*
+ * The clean resource table is no longer needed. Allocated in
+ * rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+ return 0;
+}
+
+static int rproc_reset_rsc_table_on_stop(struct rproc *rproc)
+{
+ /* A resource table was never retrieved, nothing to do here */
+ if (!rproc->table_ptr)
+ return 0;
+
+ /*
+ * If a cache table exists the remote processor was started by
+ * the remoteproc core. That cache table should be used for
+ * the rest of the shutdown process.
+ */
+ if (rproc->cached_table)
+ goto out;
+
+ /*
+ * If we made it here the remote processor was started by another
+ * entity and a cache table doesn't exist. As such make a copy of
+ * the resource table currently used by the remote processor and
+ * use that for the rest of the shutdown process. The memory
+ * allocated here is free'd in rproc_shutdown().
+ */
+ rproc->cached_table = kmemdup(rproc->table_ptr,
+ rproc->table_sz, GFP_KERNEL);
+ if (!rproc->cached_table)
+ return -ENOMEM;
+
+ /*
+ * Since the remote processor is being switched off the clean table
+ * won't be needed. Allocated in rproc_set_rsc_table().
+ */
+ kfree(rproc->clean_table);
+
+out:
+ /*
+ * Use a copy of the resource table for the remainder of the
+ * shutdown process.
+ */
+ rproc->table_ptr = rproc->cached_table;
+ return 0;
+}
+
+/*
+ * Attach to remote processor - similar to rproc_fw_boot() but without
+ * the steps that deal with the firmware image.
+ */
+static int rproc_attach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ /*
+ * if enabling an IOMMU isn't relevant for this rproc, this is
+ * just a nop
+ */
+ ret = rproc_enable_iommu(rproc);
+ if (ret) {
+ dev_err(dev, "can't enable iommu: %d\n", ret);
+ return ret;
+ }
+
+ /* Do anything that is needed to boot the remote processor */
+ ret = rproc_prepare_device(rproc);
+ if (ret) {
+ dev_err(dev, "can't prepare rproc %s: %d\n", rproc->name, ret);
+ goto disable_iommu;
+ }
+
+ ret = rproc_set_rsc_table(rproc);
+ if (ret) {
+ dev_err(dev, "can't load resource table: %d\n", ret);
+ goto unprepare_device;
+ }
+
+ /* reset max_notifyid */
+ rproc->max_notifyid = -1;
+
+ /* reset handled vdev */
+ rproc->nb_vdev = 0;
+
+ /*
+ * Handle firmware resources required to attach to a remote processor.
+ * Because we are attaching rather than booting the remote processor,
+ * we expect the platform driver to properly set rproc->table_ptr.
+ */
+ ret = rproc_handle_resources(rproc, rproc_loading_handlers);
+ if (ret) {
+ dev_err(dev, "Failed to process resources: %d\n", ret);
+ goto unprepare_device;
+ }
+
+ /* Allocate carveout resources associated to rproc */
+ ret = rproc_alloc_registered_carveouts(rproc);
+ if (ret) {
+ dev_err(dev, "Failed to allocate associated carveouts: %d\n",
+ ret);
+ goto clean_up_resources;
+ }
+
+ ret = __rproc_attach(rproc);
+ if (ret)
+ goto clean_up_resources;
+
+ return 0;
+
+clean_up_resources:
+ rproc_resource_cleanup(rproc);
+unprepare_device:
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
disable_iommu:
rproc_disable_iommu(rproc);
return ret;
@@ -1449,10 +1788,19 @@ static int rproc_trigger_auto_boot(struct rproc *rproc)
int ret;
/*
+ * Since the remote processor is in a detached state, it has already
+ * been booted by another entity. As such there is no point in waiting
+ * for a firmware image to be loaded, we can simply initiate the process
+ * of attaching to it immediately.
+ */
+ if (rproc->state == RPROC_DETACHED)
+ return rproc_boot(rproc);
+
+ /*
* We're initiating an asynchronous firmware loading, so we can
* be built-in kernel code, without hanging the boot process.
*/
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
rproc->firmware, &rproc->dev, GFP_KERNEL,
rproc, rproc_auto_boot_callback);
if (ret < 0)
@@ -1466,11 +1814,20 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
struct device *dev = &rproc->dev;
int ret;
+ /* No need to continue if a stop() operation has not been provided */
+ if (!rproc->ops->stop)
+ return -EINVAL;
+
/* Stop any subdevices for the remote processor */
rproc_stop_subdevices(rproc, crashed);
/* the installed resource table is no longer accessible */
- rproc->table_ptr = rproc->cached_table;
+ ret = rproc_reset_rsc_table_on_stop(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
+
/* power off the remote processor */
ret = rproc->ops->stop(rproc);
@@ -1488,153 +1845,42 @@ static int rproc_stop(struct rproc *rproc, bool crashed)
return 0;
}
-/**
- * rproc_coredump_add_segment() - add segment of device memory to coredump
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- *
- * Add device memory to the list of segments to be included in a coredump for
- * the remoteproc.
- *
- * Return: 0 on success, negative errno on error.
- */
-int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
-{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
-
- list_add_tail(&segment->node, &rproc->dump_segments);
-
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_add_segment);
-
-/**
- * rproc_coredump_add_custom_segment() - add custom coredump segment
- * @rproc: handle of a remote processor
- * @da: device address
- * @size: size of segment
- * @dumpfn: custom dump function called for each segment during coredump
- * @priv: private data
- *
- * Add device memory to the list of segments to be included in the coredump
- * and associate the segment with the given custom dump function and private
- * data.
- *
- * Return: 0 on success, negative errno on error.
+/*
+ * __rproc_detach(): Does the opposite of __rproc_attach()
*/
-int rproc_coredump_add_custom_segment(struct rproc *rproc,
- dma_addr_t da, size_t size,
- void (*dumpfn)(struct rproc *rproc,
- struct rproc_dump_segment *segment,
- void *dest),
- void *priv)
+static int __rproc_detach(struct rproc *rproc)
{
- struct rproc_dump_segment *segment;
-
- segment = kzalloc(sizeof(*segment), GFP_KERNEL);
- if (!segment)
- return -ENOMEM;
-
- segment->da = da;
- segment->size = size;
- segment->priv = priv;
- segment->dump = dumpfn;
-
- list_add_tail(&segment->node, &rproc->dump_segments);
+ struct device *dev = &rproc->dev;
+ int ret;
- return 0;
-}
-EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+ /* No need to continue if a detach() operation has not been provided */
+ if (!rproc->ops->detach)
+ return -EINVAL;
-/**
- * rproc_coredump() - perform coredump
- * @rproc: rproc handle
- *
- * This function will generate an ELF header for the registered segments
- * and create a devcoredump device associated with rproc.
- */
-static void rproc_coredump(struct rproc *rproc)
-{
- struct rproc_dump_segment *segment;
- struct elf32_phdr *phdr;
- struct elf32_hdr *ehdr;
- size_t data_size;
- size_t offset;
- void *data;
- void *ptr;
- int phnum = 0;
-
- if (list_empty(&rproc->dump_segments))
- return;
+ /* Stop any subdevices for the remote processor */
+ rproc_stop_subdevices(rproc, false);
- data_size = sizeof(*ehdr);
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- data_size += sizeof(*phdr) + segment->size;
+ /* the installed resource table is no longer accessible */
+ ret = rproc_reset_rsc_table_on_detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't reset resource table: %d\n", ret);
+ return ret;
+ }
- phnum++;
+ /* Tell the remote processor the core isn't available anymore */
+ ret = rproc->ops->detach(rproc);
+ if (ret) {
+ dev_err(dev, "can't detach from rproc: %d\n", ret);
+ return ret;
}
- data = vmalloc(data_size);
- if (!data)
- return;
+ rproc_unprepare_subdevices(rproc);
- ehdr = data;
-
- memset(ehdr, 0, sizeof(*ehdr));
- memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
- ehdr->e_ident[EI_CLASS] = ELFCLASS32;
- ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr->e_ident[EI_VERSION] = EV_CURRENT;
- ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
- ehdr->e_type = ET_CORE;
- ehdr->e_machine = EM_NONE;
- ehdr->e_version = EV_CURRENT;
- ehdr->e_entry = rproc->bootaddr;
- ehdr->e_phoff = sizeof(*ehdr);
- ehdr->e_ehsize = sizeof(*ehdr);
- ehdr->e_phentsize = sizeof(*phdr);
- ehdr->e_phnum = phnum;
-
- phdr = data + ehdr->e_phoff;
- offset = ehdr->e_phoff + sizeof(*phdr) * ehdr->e_phnum;
- list_for_each_entry(segment, &rproc->dump_segments, node) {
- memset(phdr, 0, sizeof(*phdr));
- phdr->p_type = PT_LOAD;
- phdr->p_offset = offset;
- phdr->p_vaddr = segment->da;
- phdr->p_paddr = segment->da;
- phdr->p_filesz = segment->size;
- phdr->p_memsz = segment->size;
- phdr->p_flags = PF_R | PF_W | PF_X;
- phdr->p_align = 0;
-
- if (segment->dump) {
- segment->dump(rproc, segment, data + offset);
- } else {
- ptr = rproc_da_to_va(rproc, segment->da, segment->size);
- if (!ptr) {
- dev_err(&rproc->dev,
- "invalid coredump segment (%pad, %zu)\n",
- &segment->da, segment->size);
- memset(data + offset, 0xff, segment->size);
- } else {
- memcpy(data + offset, ptr, segment->size);
- }
- }
+ rproc->state = RPROC_DETACHED;
- offset += phdr->p_filesz;
- phdr++;
- }
+ dev_info(dev, "detached remote processor %s\n", rproc->name);
- dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return 0;
}
/**
@@ -1646,6 +1892,8 @@ static void rproc_coredump(struct rproc *rproc)
* remoteproc functional again.
*
* This function can sleep, so it cannot be called from atomic context.
+ *
+ * Return: 0 on success or a negative value upon failure
*/
int rproc_trigger_recovery(struct rproc *rproc)
{
@@ -1653,18 +1901,22 @@ int rproc_trigger_recovery(struct rproc *rproc)
struct device *dev = &rproc->dev;
int ret;
- dev_err(dev, "recovering %s\n", rproc->name);
-
ret = mutex_lock_interruptible(&rproc->lock);
if (ret)
return ret;
+ /* State could have changed before we got the mutex */
+ if (rproc->state != RPROC_CRASHED)
+ goto unlock_mutex;
+
+ dev_err(dev, "recovering %s\n", rproc->name);
+
ret = rproc_stop(rproc, true);
if (ret)
goto unlock_mutex;
/* generate coredump */
- rproc_coredump(rproc);
+ rproc->ops->coredump(rproc);
/* load firmware */
ret = request_firmware(&firmware_p, rproc->firmware, dev);
@@ -1685,6 +1937,7 @@ unlock_mutex:
/**
* rproc_crash_handler_work() - handle a crash
+ * @work: work treating the crash
*
* This function needs to handle everything related to a crash, like cpu
* registers and stack dump, information to help to debug the fatal error, etc.
@@ -1712,6 +1965,8 @@ static void rproc_crash_handler_work(struct work_struct *work)
if (!rproc->recovery_disabled)
rproc_trigger_recovery(rproc);
+
+ pm_relax(rproc->dev.parent);
}
/**
@@ -1723,7 +1978,7 @@ static void rproc_crash_handler_work(struct work_struct *work)
* If the remote processor is already powered on, this function immediately
* returns (successfully).
*
- * Returns 0 on success, and an appropriate error value otherwise.
+ * Return: 0 on success, and an appropriate error value otherwise
*/
int rproc_boot(struct rproc *rproc)
{
@@ -1750,24 +2005,30 @@ int rproc_boot(struct rproc *rproc)
goto unlock_mutex;
}
- /* skip the boot process if rproc is already powered up */
+ /* skip the boot or attach process if rproc is already powered up */
if (atomic_inc_return(&rproc->power) > 1) {
ret = 0;
goto unlock_mutex;
}
- dev_info(dev, "powering up %s\n", rproc->name);
+ if (rproc->state == RPROC_DETACHED) {
+ dev_info(dev, "attaching to %s\n", rproc->name);
- /* load firmware */
- ret = request_firmware(&firmware_p, rproc->firmware, dev);
- if (ret < 0) {
- dev_err(dev, "request_firmware failed: %d\n", ret);
- goto downref_rproc;
- }
+ ret = rproc_attach(rproc);
+ } else {
+ dev_info(dev, "powering up %s\n", rproc->name);
+
+ /* load firmware */
+ ret = request_firmware(&firmware_p, rproc->firmware, dev);
+ if (ret < 0) {
+ dev_err(dev, "request_firmware failed: %d\n", ret);
+ goto downref_rproc;
+ }
- ret = rproc_fw_boot(rproc, firmware_p);
+ ret = rproc_fw_boot(rproc, firmware_p);
- release_firmware(firmware_p);
+ release_firmware(firmware_p);
+ }
downref_rproc:
if (ret)
@@ -1796,16 +2057,24 @@ EXPORT_SYMBOL(rproc_boot);
* which means that the @rproc handle stays valid even after rproc_shutdown()
* returns, and users can still use it with a subsequent rproc_boot(), if
* needed.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
*/
-void rproc_shutdown(struct rproc *rproc)
+int rproc_shutdown(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
- int ret;
+ int ret = 0;
ret = mutex_lock_interruptible(&rproc->lock);
if (ret) {
dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
- return;
+ return ret;
+ }
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
}
/* if the remote proc is still needed, bail out */
@@ -1821,6 +2090,9 @@ void rproc_shutdown(struct rproc *rproc)
/* clean up all acquired resources */
rproc_resource_cleanup(rproc);
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
@@ -1829,10 +2101,77 @@ void rproc_shutdown(struct rproc *rproc)
rproc->table_ptr = NULL;
out:
mutex_unlock(&rproc->lock);
+ return ret;
}
EXPORT_SYMBOL(rproc_shutdown);
/**
+ * rproc_detach() - Detach the remote processor from the
+ * remoteproc core
+ *
+ * @rproc: the remote processor
+ *
+ * Detach a remote processor (previously attached to with rproc_attach()).
+ *
+ * In case @rproc is still being used by an additional user(s), then
+ * this function will just decrement the power refcount and exit,
+ * without disconnecting the device.
+ *
+ * Function rproc_detach() calls __rproc_detach() in order to let a remote
+ * processor know that services provided by the application processor are
+ * no longer available. From there it should be possible to remove the
+ * platform driver and even power cycle the application processor (if the HW
+ * supports it) without needing to switch off the remote processor.
+ *
+ * Return: 0 on success, and an appropriate error value otherwise
+ */
+int rproc_detach(struct rproc *rproc)
+{
+ struct device *dev = &rproc->dev;
+ int ret;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return ret;
+ }
+
+ if (rproc->state != RPROC_ATTACHED) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* if the remote proc is still needed, bail out */
+ if (!atomic_dec_and_test(&rproc->power)) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = __rproc_detach(rproc);
+ if (ret) {
+ atomic_inc(&rproc->power);
+ goto out;
+ }
+
+ /* clean up all acquired resources */
+ rproc_resource_cleanup(rproc);
+
+ /* release HW resources if needed */
+ rproc_unprepare_device(rproc);
+
+ rproc_disable_iommu(rproc);
+
+ /* Free the copy of the resource table */
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
+ rproc->table_ptr = NULL;
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_detach);
+
+/**
* rproc_get_by_phandle() - find a remote processor by phandle
* @phandle: phandle to the rproc
*
@@ -1842,7 +2181,7 @@ EXPORT_SYMBOL(rproc_shutdown);
* This function increments the remote processor's refcount, so always
* use rproc_put() to decrement it back once rproc isn't needed anymore.
*
- * Returns the rproc handle on success, and NULL on failure.
+ * Return: rproc handle on success, and NULL on failure
*/
#ifdef CONFIG_OF
struct rproc *rproc_get_by_phandle(phandle phandle)
@@ -1854,8 +2193,8 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
if (!np)
return NULL;
- mutex_lock(&rproc_list_mutex);
- list_for_each_entry(r, &rproc_list, node) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && r->dev.parent->of_node == np) {
/* prevent underlying implementation from being removed */
if (!try_module_get(r->dev.parent->driver->owner)) {
@@ -1868,7 +2207,7 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
break;
}
}
- mutex_unlock(&rproc_list_mutex);
+ rcu_read_unlock();
of_node_put(np);
@@ -1883,6 +2222,106 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
EXPORT_SYMBOL(rproc_get_by_phandle);
/**
+ * rproc_set_firmware() - assign a new firmware
+ * @rproc: rproc handle to which the new firmware is being assigned
+ * @fw_name: new firmware name to be assigned
+ *
+ * This function allows remoteproc drivers or clients to configure a custom
+ * firmware name that is different from the default name used during remoteproc
+ * registration. The function does not trigger a remote processor boot,
+ * only sets the firmware name used for a subsequent boot. This function
+ * should also be called only when the remote processor is offline.
+ *
+ * This allows either the userspace to configure a different name through
+ * sysfs or a kernel-level remoteproc or a remoteproc client driver to set
+ * a specific firmware when it is controlling the boot and shutdown of the
+ * remote processor.
+ *
+ * Return: 0 on success or a negative value upon failure
+ */
+int rproc_set_firmware(struct rproc *rproc, const char *fw_name)
+{
+ struct device *dev;
+ int ret, len;
+ char *p;
+
+ if (!rproc || !fw_name)
+ return -EINVAL;
+
+ dev = rproc->dev.parent;
+
+ ret = mutex_lock_interruptible(&rproc->lock);
+ if (ret) {
+ dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret);
+ return -EINVAL;
+ }
+
+ if (rproc->state != RPROC_OFFLINE) {
+ dev_err(dev, "can't change firmware while running\n");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ len = strcspn(fw_name, "\n");
+ if (!len) {
+ dev_err(dev, "can't provide empty string for firmware name\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ p = kstrndup(fw_name, len, GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ kfree_const(rproc->firmware);
+ rproc->firmware = p;
+
+out:
+ mutex_unlock(&rproc->lock);
+ return ret;
+}
+EXPORT_SYMBOL(rproc_set_firmware);
+
+static int rproc_validate(struct rproc *rproc)
+{
+ switch (rproc->state) {
+ case RPROC_OFFLINE:
+ /*
+ * An offline processor without a start()
+ * function makes no sense.
+ */
+ if (!rproc->ops->start)
+ return -EINVAL;
+ break;
+ case RPROC_DETACHED:
+ /*
+ * A remote processor in a detached state without an
+ * attach() function makes not sense.
+ */
+ if (!rproc->ops->attach)
+ return -EINVAL;
+ /*
+ * When attaching to a remote processor the device memory
+ * is already available and as such there is no need to have a
+ * cached table.
+ */
+ if (rproc->cached_table)
+ return -EINVAL;
+ break;
+ default:
+ /*
+ * When adding a remote processor, the state of the device
+ * can be offline or detached, nothing else.
+ */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* rproc_add() - register a remote processor
* @rproc: the remote processor handle to register
*
@@ -1892,8 +2331,6 @@ EXPORT_SYMBOL(rproc_get_by_phandle);
* This is called by the platform-specific rproc implementation, whenever
* a new remote processor device is probed.
*
- * Returns 0 on success and an appropriate error code otherwise.
- *
* Note: this function initiates an asynchronous firmware loading
* context, which will look for virtio devices supported by the rproc's
* firmware.
@@ -1901,16 +2338,29 @@ EXPORT_SYMBOL(rproc_get_by_phandle);
* If found, those virtio devices will be created and added, so as a result
* of registering this remote processor, additional virtio drivers might be
* probed.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
*/
int rproc_add(struct rproc *rproc)
{
struct device *dev = &rproc->dev;
int ret;
- ret = device_add(dev);
+ ret = rproc_validate(rproc);
if (ret < 0)
return ret;
+ /* add char device for this remoteproc */
+ ret = rproc_char_device_add(rproc);
+ if (ret < 0)
+ return ret;
+
+ ret = device_add(dev);
+ if (ret < 0) {
+ put_device(dev);
+ goto rproc_remove_cdev;
+ }
+
dev_info(dev, "%s is available\n", rproc->name);
/* create debugfs entries */
@@ -1920,18 +2370,52 @@ int rproc_add(struct rproc *rproc)
if (rproc->auto_boot) {
ret = rproc_trigger_auto_boot(rproc);
if (ret < 0)
- return ret;
+ goto rproc_remove_dev;
}
/* expose to rproc_get_by_phandle users */
mutex_lock(&rproc_list_mutex);
- list_add(&rproc->node, &rproc_list);
+ list_add_rcu(&rproc->node, &rproc_list);
mutex_unlock(&rproc_list_mutex);
return 0;
+
+rproc_remove_dev:
+ rproc_delete_debug_dir(rproc);
+ device_del(dev);
+rproc_remove_cdev:
+ rproc_char_device_remove(rproc);
+ return ret;
}
EXPORT_SYMBOL(rproc_add);
+static void devm_rproc_remove(void *rproc)
+{
+ rproc_del(rproc);
+}
+
+/**
+ * devm_rproc_add() - resource managed rproc_add()
+ * @dev: the underlying device
+ * @rproc: the remote processor handle to register
+ *
+ * This function performs like rproc_add() but the registered rproc device will
+ * automatically be removed on driver detach.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+int devm_rproc_add(struct device *dev, struct rproc *rproc)
+{
+ int err;
+
+ err = rproc_add(rproc);
+ if (err)
+ return err;
+
+ return devm_add_action_or_reset(dev, devm_rproc_remove, rproc);
+}
+EXPORT_SYMBOL(devm_rproc_add);
+
/**
* rproc_type_release() - release a remote processor instance
* @dev: the rproc's device
@@ -1952,7 +2436,8 @@ static void rproc_type_release(struct device *dev)
if (rproc->index >= 0)
ida_simple_remove(&rproc_dev_index, rproc->index);
- kfree(rproc->firmware);
+ kfree_const(rproc->firmware);
+ kfree_const(rproc->name);
kfree(rproc->ops);
kfree(rproc);
}
@@ -1962,6 +2447,51 @@ static const struct device_type rproc_type = {
.release = rproc_type_release,
};
+static int rproc_alloc_firmware(struct rproc *rproc,
+ const char *name, const char *firmware)
+{
+ const char *p;
+
+ /*
+ * Allocate a firmware name if the caller gave us one to work
+ * with. Otherwise construct a new one using a default pattern.
+ */
+ if (firmware)
+ p = kstrdup_const(firmware, GFP_KERNEL);
+ else
+ p = kasprintf(GFP_KERNEL, "rproc-%s-fw", name);
+
+ if (!p)
+ return -ENOMEM;
+
+ rproc->firmware = p;
+
+ return 0;
+}
+
+static int rproc_alloc_ops(struct rproc *rproc, const struct rproc_ops *ops)
+{
+ rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
+ if (!rproc->ops)
+ return -ENOMEM;
+
+ /* Default to rproc_coredump if no coredump function is specified */
+ if (!rproc->ops->coredump)
+ rproc->ops->coredump = rproc_coredump;
+
+ if (rproc->ops->load)
+ return 0;
+
+ /* Default to ELF loader if no load function is specified */
+ rproc->ops->load = rproc_elf_load_segments;
+ rproc->ops->parse_fw = rproc_elf_load_rsc_table;
+ rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
+ rproc->ops->sanity_check = rproc_elf_sanity_check;
+ rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
+
+ return 0;
+}
+
/**
* rproc_alloc() - allocate a remote processor handle
* @dev: the underlying device
@@ -1980,87 +2510,59 @@ static const struct device_type rproc_type = {
* implementations should then call rproc_add() to complete
* the registration of the remote processor.
*
- * On success the new rproc is returned, and on failure, NULL.
- *
* Note: _never_ directly deallocate @rproc, even if it was not registered
* yet. Instead, when you need to unroll rproc_alloc(), use rproc_free().
+ *
+ * Return: new rproc pointer on success, and NULL on failure
*/
struct rproc *rproc_alloc(struct device *dev, const char *name,
const struct rproc_ops *ops,
const char *firmware, int len)
{
struct rproc *rproc;
- char *p, *template = "rproc-%s-fw";
- int name_len;
if (!dev || !name || !ops)
return NULL;
- if (!firmware) {
- /*
- * If the caller didn't pass in a firmware name then
- * construct a default name.
- */
- name_len = strlen(name) + strlen(template) - 2 + 1;
- p = kmalloc(name_len, GFP_KERNEL);
- if (!p)
- return NULL;
- snprintf(p, name_len, template, name);
- } else {
- p = kstrdup(firmware, GFP_KERNEL);
- if (!p)
- return NULL;
- }
-
rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL);
- if (!rproc) {
- kfree(p);
- return NULL;
- }
-
- rproc->ops = kmemdup(ops, sizeof(*ops), GFP_KERNEL);
- if (!rproc->ops) {
- kfree(p);
- kfree(rproc);
+ if (!rproc)
return NULL;
- }
- rproc->firmware = p;
- rproc->name = name;
rproc->priv = &rproc[1];
rproc->auto_boot = true;
+ rproc->elf_class = ELFCLASSNONE;
+ rproc->elf_machine = EM_NONE;
device_initialize(&rproc->dev);
rproc->dev.parent = dev;
rproc->dev.type = &rproc_type;
rproc->dev.class = &rproc_class;
rproc->dev.driver_data = rproc;
+ idr_init(&rproc->notifyids);
+
+ rproc->name = kstrdup_const(name, GFP_KERNEL);
+ if (!rproc->name)
+ goto put_device;
+
+ if (rproc_alloc_firmware(rproc, name, firmware))
+ goto put_device;
+
+ if (rproc_alloc_ops(rproc, ops))
+ goto put_device;
/* Assign a unique device index and name */
rproc->index = ida_simple_get(&rproc_dev_index, 0, 0, GFP_KERNEL);
if (rproc->index < 0) {
dev_err(dev, "ida_simple_get failed: %d\n", rproc->index);
- put_device(&rproc->dev);
- return NULL;
+ goto put_device;
}
dev_set_name(&rproc->dev, "remoteproc%d", rproc->index);
atomic_set(&rproc->power, 0);
- /* Default to ELF loader if no load function is specified */
- if (!rproc->ops->load) {
- rproc->ops->load = rproc_elf_load_segments;
- rproc->ops->parse_fw = rproc_elf_load_rsc_table;
- rproc->ops->find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table;
- rproc->ops->sanity_check = rproc_elf_sanity_check;
- rproc->ops->get_boot_addr = rproc_elf_get_boot_addr;
- }
-
mutex_init(&rproc->lock);
- idr_init(&rproc->notifyids);
-
INIT_LIST_HEAD(&rproc->carveouts);
INIT_LIST_HEAD(&rproc->mappings);
INIT_LIST_HEAD(&rproc->traces);
@@ -2073,6 +2575,10 @@ struct rproc *rproc_alloc(struct device *dev, const char *name,
rproc->state = RPROC_OFFLINE;
return rproc;
+
+put_device:
+ put_device(&rproc->dev);
+ return NULL;
}
EXPORT_SYMBOL(rproc_alloc);
@@ -2120,17 +2626,15 @@ EXPORT_SYMBOL(rproc_put);
* of the outstanding reference created by rproc_alloc. To decrement that
* one last refcount, one still needs to call rproc_free().
*
- * Returns 0 on success and -EINVAL if @rproc isn't valid.
+ * Return: 0 on success and -EINVAL if @rproc isn't valid
*/
int rproc_del(struct rproc *rproc)
{
if (!rproc)
return -EINVAL;
- /* if rproc is marked always-on, rproc_add() booted it */
/* TODO: make sure this works with rproc->power > 1 */
- if (rproc->auto_boot)
- rproc_shutdown(rproc);
+ rproc_shutdown(rproc);
mutex_lock(&rproc->lock);
rproc->state = RPROC_DELETED;
@@ -2140,15 +2644,59 @@ int rproc_del(struct rproc *rproc)
/* the rproc is downref'ed as soon as it's removed from the klist */
mutex_lock(&rproc_list_mutex);
- list_del(&rproc->node);
+ list_del_rcu(&rproc->node);
mutex_unlock(&rproc_list_mutex);
+ /* Ensure that no readers of rproc_list are still active */
+ synchronize_rcu();
+
device_del(&rproc->dev);
+ rproc_char_device_remove(rproc);
return 0;
}
EXPORT_SYMBOL(rproc_del);
+static void devm_rproc_free(struct device *dev, void *res)
+{
+ rproc_free(*(struct rproc **)res);
+}
+
+/**
+ * devm_rproc_alloc() - resource managed rproc_alloc()
+ * @dev: the underlying device
+ * @name: name of this remote processor
+ * @ops: platform-specific handlers (mainly start/stop)
+ * @firmware: name of firmware file to load, can be NULL
+ * @len: length of private data needed by the rproc driver (in bytes)
+ *
+ * This function performs like rproc_alloc() but the acquired rproc device will
+ * automatically be released on driver detach.
+ *
+ * Return: new rproc instance, or NULL on failure
+ */
+struct rproc *devm_rproc_alloc(struct device *dev, const char *name,
+ const struct rproc_ops *ops,
+ const char *firmware, int len)
+{
+ struct rproc **ptr, *rproc;
+
+ ptr = devres_alloc(devm_rproc_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ rproc = rproc_alloc(dev, name, ops, firmware, len);
+ if (rproc) {
+ *ptr = rproc;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return rproc;
+}
+EXPORT_SYMBOL(devm_rproc_alloc);
+
/**
* rproc_add_subdev() - add a subdevice to a remoteproc
* @rproc: rproc handle to add the subdevice to
@@ -2177,7 +2725,7 @@ EXPORT_SYMBOL(rproc_remove_subdev);
* rproc_get_by_child() - acquire rproc handle of @dev's ancestor
* @dev: child device to find ancestor of
*
- * Returns the ancestor rproc instance, or NULL if not found.
+ * Return: the ancestor rproc instance, or NULL if not found
*/
struct rproc *rproc_get_by_child(struct device *dev)
{
@@ -2208,27 +2756,76 @@ void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type)
return;
}
+ /* Prevent suspend while the remoteproc is being recovered */
+ pm_stay_awake(rproc->dev.parent);
+
dev_err(&rproc->dev, "crash detected in %s: type %s\n",
rproc->name, rproc_crash_to_string(type));
- /* create a new task to handle the error */
- schedule_work(&rproc->crash_handler);
+ /* Have a worker handle the error; ensure system is not suspended */
+ queue_work(system_freezable_wq, &rproc->crash_handler);
}
EXPORT_SYMBOL(rproc_report_crash);
+static int rproc_panic_handler(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ unsigned int longest = 0;
+ struct rproc *rproc;
+ unsigned int d;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rproc, &rproc_list, node) {
+ if (!rproc->ops->panic)
+ continue;
+
+ if (rproc->state != RPROC_RUNNING &&
+ rproc->state != RPROC_ATTACHED)
+ continue;
+
+ d = rproc->ops->panic(rproc);
+ longest = max(longest, d);
+ }
+ rcu_read_unlock();
+
+ /*
+ * Delay for the longest requested duration before returning. This can
+ * be used by the remoteproc drivers to give the remote processor time
+ * to perform any requested operations (such as flush caches), when
+ * it's not possible to signal the Linux side due to the panic.
+ */
+ mdelay(longest);
+
+ return NOTIFY_DONE;
+}
+
+static void __init rproc_init_panic(void)
+{
+ rproc_panic_nb.notifier_call = rproc_panic_handler;
+ atomic_notifier_chain_register(&panic_notifier_list, &rproc_panic_nb);
+}
+
+static void __exit rproc_exit_panic(void)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list, &rproc_panic_nb);
+}
+
static int __init remoteproc_init(void)
{
rproc_init_sysfs();
rproc_init_debugfs();
+ rproc_init_cdev();
+ rproc_init_panic();
return 0;
}
-module_init(remoteproc_init);
+subsys_initcall(remoteproc_init);
static void __exit remoteproc_exit(void)
{
ida_destroy(&rproc_dev_index);
+ rproc_exit_panic();
rproc_exit_debugfs();
rproc_exit_sysfs();
}
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
new file mode 100644
index 000000000000..4b093420d98a
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -0,0 +1,469 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Coredump functionality for Remoteproc framework.
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/devcoredump.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/remoteproc.h>
+#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
+
+struct rproc_coredump_state {
+ struct rproc *rproc;
+ void *header;
+ struct completion dump_done;
+};
+
+/**
+ * rproc_coredump_cleanup() - clean up dump_segments list
+ * @rproc: the remote processor handle
+ */
+void rproc_coredump_cleanup(struct rproc *rproc)
+{
+ struct rproc_dump_segment *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+/**
+ * rproc_coredump_add_segment() - add segment of device memory to coredump
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ *
+ * Add device memory to the list of segments to be included in a coredump for
+ * the remoteproc.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_segment);
+
+/**
+ * rproc_coredump_add_custom_segment() - add custom coredump segment
+ * @rproc: handle of a remote processor
+ * @da: device address
+ * @size: size of segment
+ * @dumpfn: custom dump function called for each segment during coredump
+ * @priv: private data
+ *
+ * Add device memory to the list of segments to be included in the coredump
+ * and associate the segment with the given custom dump function and private
+ * data.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_add_custom_segment(struct rproc *rproc,
+ dma_addr_t da, size_t size,
+ void (*dumpfn)(struct rproc *rproc,
+ struct rproc_dump_segment *segment,
+ void *dest, size_t offset,
+ size_t size),
+ void *priv)
+{
+ struct rproc_dump_segment *segment;
+
+ segment = kzalloc(sizeof(*segment), GFP_KERNEL);
+ if (!segment)
+ return -ENOMEM;
+
+ segment->da = da;
+ segment->size = size;
+ segment->priv = priv;
+ segment->dump = dumpfn;
+
+ list_add_tail(&segment->node, &rproc->dump_segments);
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_add_custom_segment);
+
+/**
+ * rproc_coredump_set_elf_info() - set coredump elf information
+ * @rproc: handle of a remote processor
+ * @class: elf class for coredump elf file
+ * @machine: elf machine for coredump elf file
+ *
+ * Set elf information which will be used for coredump elf file.
+ *
+ * Return: 0 on success, negative errno on error.
+ */
+int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine)
+{
+ if (class != ELFCLASS64 && class != ELFCLASS32)
+ return -EINVAL;
+
+ rproc->elf_class = class;
+ rproc->elf_machine = machine;
+
+ return 0;
+}
+EXPORT_SYMBOL(rproc_coredump_set_elf_info);
+
+static void rproc_coredump_free(void *data)
+{
+ struct rproc_coredump_state *dump_state = data;
+
+ vfree(dump_state->header);
+ complete(&dump_state->dump_done);
+}
+
+static void *rproc_coredump_find_segment(loff_t user_offset,
+ struct list_head *segments,
+ size_t *data_left)
+{
+ struct rproc_dump_segment *segment;
+
+ list_for_each_entry(segment, segments, node) {
+ if (user_offset < segment->size) {
+ *data_left = segment->size - user_offset;
+ return segment;
+ }
+ user_offset -= segment->size;
+ }
+
+ *data_left = 0;
+ return NULL;
+}
+
+static void rproc_copy_segment(struct rproc *rproc, void *dest,
+ struct rproc_dump_segment *segment,
+ size_t offset, size_t size)
+{
+ bool is_iomem = false;
+ void *ptr;
+
+ if (segment->dump) {
+ segment->dump(rproc, segment, dest, offset, size);
+ } else {
+ ptr = rproc_da_to_va(rproc, segment->da + offset, size, &is_iomem);
+ if (!ptr) {
+ dev_err(&rproc->dev,
+ "invalid copy request for segment %pad with offset %zu and size %zu)\n",
+ &segment->da, offset, size);
+ memset(dest, 0xff, size);
+ } else {
+ if (is_iomem)
+ memcpy_fromio(dest, (void const __iomem *)ptr, size);
+ else
+ memcpy(dest, ptr, size);
+ }
+ }
+}
+
+static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count,
+ void *data, size_t header_sz)
+{
+ size_t seg_data, bytes_left = count;
+ ssize_t copy_sz;
+ struct rproc_dump_segment *seg;
+ struct rproc_coredump_state *dump_state = data;
+ struct rproc *rproc = dump_state->rproc;
+ void *elfcore = dump_state->header;
+
+ /* Copy the vmalloc'ed header first. */
+ if (offset < header_sz) {
+ copy_sz = memory_read_from_buffer(buffer, count, &offset,
+ elfcore, header_sz);
+
+ return copy_sz;
+ }
+
+ /*
+ * Find out the segment memory chunk to be copied based on offset.
+ * Keep copying data until count bytes are read.
+ */
+ while (bytes_left) {
+ seg = rproc_coredump_find_segment(offset - header_sz,
+ &rproc->dump_segments,
+ &seg_data);
+ /* EOF check */
+ if (!seg) {
+ dev_info(&rproc->dev, "Ramdump done, %lld bytes read",
+ offset);
+ break;
+ }
+
+ copy_sz = min_t(size_t, bytes_left, seg_data);
+
+ rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data,
+ copy_sz);
+
+ offset += copy_sz;
+ buffer += copy_sz;
+ bytes_left -= copy_sz;
+ }
+
+ return count - bytes_left;
+}
+
+/**
+ * rproc_coredump() - perform coredump
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered segments
+ * and create a devcoredump device associated with rproc. Based on the
+ * coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *phdr;
+ void *ehdr;
+ size_t data_size;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int phnum = 0;
+ struct rproc_coredump_state dump_state;
+ enum rproc_dump_mechanism dump_conf = rproc->dump_conf;
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ data_size = elf_size_of_hdr(class);
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ /*
+ * For default configuration buffer includes headers & segments.
+ * For inline dump buffer just includes headers as segments are
+ * directly read from device memory.
+ */
+ data_size += elf_size_of_phdr(class);
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ data_size += segment->size;
+
+ phnum++;
+ }
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class));
+ elf_hdr_set_e_phnum(class, ehdr, phnum);
+
+ phdr = data + elf_hdr_get_e_phoff(class, ehdr);
+ offset = elf_hdr_get_e_phoff(class, ehdr);
+ offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(phdr, 0, elf_size_of_phdr(class));
+ elf_phdr_set_p_type(class, phdr, PT_LOAD);
+ elf_phdr_set_p_offset(class, phdr, offset);
+ elf_phdr_set_p_vaddr(class, phdr, segment->da);
+ elf_phdr_set_p_paddr(class, phdr, segment->da);
+ elf_phdr_set_p_filesz(class, phdr, segment->size);
+ elf_phdr_set_p_memsz(class, phdr, segment->size);
+ elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X);
+ elf_phdr_set_p_align(class, phdr, 0);
+
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+
+ offset += elf_phdr_get_p_filesz(class, phdr);
+ phdr += elf_size_of_phdr(class);
+ }
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /*
+ * Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
+
+/**
+ * rproc_coredump_using_sections() - perform coredump using section headers
+ * @rproc: rproc handle
+ *
+ * This function will generate an ELF header for the registered sections of
+ * segments and create a devcoredump device associated with rproc. Based on
+ * the coredump configuration this function will directly copy the segments
+ * from device memory to userspace or copy segments from device memory to
+ * a separate buffer, which can then be read by userspace.
+ * The first approach avoids using extra vmalloc memory. But it will stall
+ * recovery flow until dump is read by userspace.
+ */
+void rproc_coredump_using_sections(struct rproc *rproc)
+{
+ struct rproc_dump_segment *segment;
+ void *shdr;
+ void *ehdr;
+ size_t data_size;
+ size_t strtbl_size = 0;
+ size_t strtbl_index = 1;
+ size_t offset;
+ void *data;
+ u8 class = rproc->elf_class;
+ int shnum;
+ struct rproc_coredump_state dump_state;
+ unsigned int dump_conf = rproc->dump_conf;
+ char *str_tbl = "STR_TBL";
+
+ if (list_empty(&rproc->dump_segments) ||
+ dump_conf == RPROC_COREDUMP_DISABLED)
+ return;
+
+ if (class == ELFCLASSNONE) {
+ dev_err(&rproc->dev, "Elf class is not set\n");
+ return;
+ }
+
+ /*
+ * We allocate two extra section headers. The first one is null.
+ * Second section header is for the string table. Also space is
+ * allocated for string table.
+ */
+ data_size = elf_size_of_hdr(class) + 2 * elf_size_of_shdr(class);
+ shnum = 2;
+
+ /* the extra byte is for the null character at index 0 */
+ strtbl_size += strlen(str_tbl) + 2;
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ data_size += elf_size_of_shdr(class);
+ strtbl_size += strlen(segment->priv) + 1;
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ data_size += segment->size;
+ shnum++;
+ }
+
+ data_size += strtbl_size;
+
+ data = vmalloc(data_size);
+ if (!data)
+ return;
+
+ ehdr = data;
+ memset(ehdr, 0, elf_size_of_hdr(class));
+ /* e_ident field is common for both elf32 and elf64 */
+ elf_hdr_init_ident(ehdr, class);
+
+ elf_hdr_set_e_type(class, ehdr, ET_CORE);
+ elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine);
+ elf_hdr_set_e_version(class, ehdr, EV_CURRENT);
+ elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr);
+ elf_hdr_set_e_shoff(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class));
+ elf_hdr_set_e_shentsize(class, ehdr, elf_size_of_shdr(class));
+ elf_hdr_set_e_shnum(class, ehdr, shnum);
+ elf_hdr_set_e_shstrndx(class, ehdr, 1);
+
+ /*
+ * The zeroth index of the section header is reserved and is rarely used.
+ * Set the section header as null (SHN_UNDEF) and move to the next one.
+ */
+ shdr = data + elf_hdr_get_e_shoff(class, ehdr);
+ memset(shdr, 0, elf_size_of_shdr(class));
+ shdr += elf_size_of_shdr(class);
+
+ /* Initialize the string table. */
+ offset = elf_hdr_get_e_shoff(class, ehdr) +
+ elf_size_of_shdr(class) * elf_hdr_get_e_shnum(class, ehdr);
+ memset(data + offset, 0, strtbl_size);
+
+ /* Fill in the string table section header. */
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_STRTAB);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_size(class, shdr, strtbl_size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, 0);
+ elf_shdr_set_sh_name(class, shdr, elf_strtbl_add(str_tbl, ehdr, class, &strtbl_index));
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+
+ list_for_each_entry(segment, &rproc->dump_segments, node) {
+ memset(shdr, 0, elf_size_of_shdr(class));
+ elf_shdr_set_sh_type(class, shdr, SHT_PROGBITS);
+ elf_shdr_set_sh_offset(class, shdr, offset);
+ elf_shdr_set_sh_addr(class, shdr, segment->da);
+ elf_shdr_set_sh_size(class, shdr, segment->size);
+ elf_shdr_set_sh_entsize(class, shdr, 0);
+ elf_shdr_set_sh_flags(class, shdr, SHF_WRITE);
+ elf_shdr_set_sh_name(class, shdr,
+ elf_strtbl_add(segment->priv, ehdr, class, &strtbl_index));
+
+ /* No need to copy segments for inline dumps */
+ if (dump_conf == RPROC_COREDUMP_ENABLED)
+ rproc_copy_segment(rproc, data + offset, segment, 0,
+ segment->size);
+ offset += elf_shdr_get_sh_size(class, shdr);
+ shdr += elf_size_of_shdr(class);
+ }
+
+ if (dump_conf == RPROC_COREDUMP_ENABLED) {
+ dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL);
+ return;
+ }
+
+ /* Initialize the dump state struct to be used by rproc_coredump_read */
+ dump_state.rproc = rproc;
+ dump_state.header = data;
+ init_completion(&dump_state.dump_done);
+
+ dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL,
+ rproc_coredump_read, rproc_coredump_free);
+
+ /* Wait until the dump is read and free is called. Data is freed
+ * by devcoredump framework automatically after 5 minutes.
+ */
+ wait_for_completion(&dump_state.dump_done);
+}
+EXPORT_SYMBOL(rproc_coredump_using_sections);
diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c
index dd93cf04e17f..b86c1d09c70c 100644
--- a/drivers/remoteproc/remoteproc_debugfs.c
+++ b/drivers/remoteproc/remoteproc_debugfs.c
@@ -28,6 +28,93 @@
static struct dentry *rproc_dbg;
/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via debugfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
+};
+
+/* Expose the current coredump configuration via debugfs */
+static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ char buf[20];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%s\n",
+ rproc_coredump_str[rproc->dump_conf]);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+/*
+ * By writing to the 'coredump' debugfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "disabled".
+ *
+ * The 'coredump' debugfs entry supports these commands:
+ *
+ * disabled: By default coredump collection is disabled. Recovery will
+ * proceed without collecting any dump.
+ *
+ * enabled: When the remoteproc crashes the entire coredump will be copied
+ * to a separate buffer and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ */
+static ssize_t rproc_coredump_write(struct file *filp,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct rproc *rproc = filp->private_data;
+ int ret, err = 0;
+ char buf[20];
+
+ if (count < 1 || count > sizeof(buf))
+ return -EINVAL;
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret)
+ return -EFAULT;
+
+ /* remove end of line */
+ if (buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (!strncmp(buf, "disabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (!strncmp(buf, "enabled", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
+ } else if (!strncmp(buf, "inline", count)) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ err = -EINVAL;
+ }
+out:
+ return err ? err : count;
+}
+
+static const struct file_operations rproc_coredump_fops = {
+ .read = rproc_coredump_read,
+ .write = rproc_coredump_write,
+ .open = simple_open,
+ .llseek = generic_file_llseek,
+};
+
+/*
* Some remote processors may support dumping trace logs into a shared
* memory buffer. We expose this trace buffer using debugfs, so users
* can easily tell what's going on remotely.
@@ -45,7 +132,7 @@ static ssize_t rproc_trace_read(struct file *filp, char __user *userbuf,
char buf[100];
int len;
- va = rproc_da_to_va(data->rproc, trace->da, trace->len);
+ va = rproc_da_to_va(data->rproc, trace->da, trace->len, NULL);
if (!va) {
len = scnprintf(buf, sizeof(buf), "Trace %s not available\n",
@@ -138,16 +225,16 @@ rproc_recovery_write(struct file *filp, const char __user *user_buf,
buf[count - 1] = '\0';
if (!strncmp(buf, "enabled", count)) {
+ /* change the flag and begin the recovery process if needed */
rproc->recovery_disabled = false;
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ rproc_trigger_recovery(rproc);
} else if (!strncmp(buf, "disabled", count)) {
rproc->recovery_disabled = true;
} else if (!strncmp(buf, "recover", count)) {
- /* if rproc has crashed, trigger recovery */
- if (rproc->state == RPROC_CRASHED)
- rproc_trigger_recovery(rproc);
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
+ return -EINVAL;
}
return count;
@@ -269,17 +356,7 @@ static int rproc_rsc_table_show(struct seq_file *seq, void *p)
return 0;
}
-static int rproc_rsc_table_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rproc_rsc_table_show, inode->i_private);
-}
-
-static const struct file_operations rproc_rsc_table_ops = {
- .open = rproc_rsc_table_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rproc_rsc_table);
/* Expose carveout content via debugfs */
static int rproc_carveouts_show(struct seq_file *seq, void *p)
@@ -293,23 +370,13 @@ static int rproc_carveouts_show(struct seq_file *seq, void *p)
seq_printf(seq, "\tVirtual address: %pK\n", carveout->va);
seq_printf(seq, "\tDMA address: %pad\n", &carveout->dma);
seq_printf(seq, "\tDevice address: 0x%x\n", carveout->da);
- seq_printf(seq, "\tLength: 0x%x Bytes\n\n", carveout->len);
+ seq_printf(seq, "\tLength: 0x%zx Bytes\n\n", carveout->len);
}
return 0;
}
-static int rproc_carveouts_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rproc_carveouts_show, inode->i_private);
-}
-
-static const struct file_operations rproc_carveouts_ops = {
- .open = rproc_carveouts_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(rproc_carveouts);
void rproc_remove_trace_file(struct dentry *tfile)
{
@@ -319,16 +386,8 @@ void rproc_remove_trace_file(struct dentry *tfile)
struct dentry *rproc_create_trace_file(const char *name, struct rproc *rproc,
struct rproc_debug_trace *trace)
{
- struct dentry *tfile;
-
- tfile = debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
+ return debugfs_create_file(name, 0400, rproc->dbg_dir, trace,
&trace_rproc_ops);
- if (!tfile) {
- dev_err(&rproc->dev, "failed to create debugfs trace entry\n");
- return NULL;
- }
-
- return tfile;
}
void rproc_delete_debug_dir(struct rproc *rproc)
@@ -344,28 +403,25 @@ void rproc_create_debug_dir(struct rproc *rproc)
return;
rproc->dbg_dir = debugfs_create_dir(dev_name(dev), rproc_dbg);
- if (!rproc->dbg_dir)
- return;
debugfs_create_file("name", 0400, rproc->dbg_dir,
rproc, &rproc_name_ops);
- debugfs_create_file("recovery", 0400, rproc->dbg_dir,
+ debugfs_create_file("recovery", 0600, rproc->dbg_dir,
rproc, &rproc_recovery_ops);
debugfs_create_file("crash", 0200, rproc->dbg_dir,
rproc, &rproc_crash_ops);
debugfs_create_file("resource_table", 0400, rproc->dbg_dir,
- rproc, &rproc_rsc_table_ops);
+ rproc, &rproc_rsc_table_fops);
debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir,
- rproc, &rproc_carveouts_ops);
+ rproc, &rproc_carveouts_fops);
+ debugfs_create_file("coredump", 0600, rproc->dbg_dir,
+ rproc, &rproc_coredump_fops);
}
void __init rproc_init_debugfs(void)
{
- if (debugfs_initialized()) {
+ if (debugfs_initialized())
rproc_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!rproc_dbg)
- pr_err("can't create debugfs dir\n");
- }
}
void __exit rproc_exit_debugfs(void)
diff --git a/drivers/remoteproc/remoteproc_elf_helpers.h b/drivers/remoteproc/remoteproc_elf_helpers.h
new file mode 100644
index 000000000000..e6de53a5000c
--- /dev/null
+++ b/drivers/remoteproc/remoteproc_elf_helpers.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Remote processor elf helpers defines
+ *
+ * Copyright (C) 2020 Kalray, Inc.
+ */
+
+#ifndef REMOTEPROC_ELF_LOADER_H
+#define REMOTEPROC_ELF_LOADER_H
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+/**
+ * fw_elf_get_class - Get elf class
+ * @fw: the ELF firmware image
+ *
+ * Note that we use elf32_hdr to access the class since the start of the
+ * struct is the same for both elf class
+ *
+ * Return: elf class of the firmware
+ */
+static inline u8 fw_elf_get_class(const struct firmware *fw)
+{
+ struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
+
+ return ehdr->e_ident[EI_CLASS];
+}
+
+static inline void elf_hdr_init_ident(struct elf32_hdr *hdr, u8 class)
+{
+ memcpy(hdr->e_ident, ELFMAG, SELFMAG);
+ hdr->e_ident[EI_CLASS] = class;
+ hdr->e_ident[EI_DATA] = ELFDATA2LSB;
+ hdr->e_ident[EI_VERSION] = EV_CURRENT;
+ hdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+}
+
+/* Generate getter and setter for a specific elf struct/field */
+#define ELF_GEN_FIELD_GET_SET(__s, __field, __type) \
+static inline __type elf_##__s##_get_##__field(u8 class, const void *arg) \
+{ \
+ if (class == ELFCLASS32) \
+ return (__type) ((const struct elf32_##__s *) arg)->__field; \
+ else \
+ return (__type) ((const struct elf64_##__s *) arg)->__field; \
+} \
+static inline void elf_##__s##_set_##__field(u8 class, void *arg, \
+ __type value) \
+{ \
+ if (class == ELFCLASS32) \
+ ((struct elf32_##__s *) arg)->__field = (__type) value; \
+ else \
+ ((struct elf64_##__s *) arg)->__field = (__type) value; \
+}
+
+ELF_GEN_FIELD_GET_SET(hdr, e_entry, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_phnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shnum, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_phoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shoff, u64)
+ELF_GEN_FIELD_GET_SET(hdr, e_shstrndx, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_machine, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_type, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_version, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_ehsize, u32)
+ELF_GEN_FIELD_GET_SET(hdr, e_phentsize, u16)
+ELF_GEN_FIELD_GET_SET(hdr, e_shentsize, u16)
+
+ELF_GEN_FIELD_GET_SET(phdr, p_paddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_vaddr, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_filesz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_memsz, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_type, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_offset, u64)
+ELF_GEN_FIELD_GET_SET(phdr, p_flags, u32)
+ELF_GEN_FIELD_GET_SET(phdr, p_align, u64)
+
+ELF_GEN_FIELD_GET_SET(shdr, sh_type, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_flags, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_entsize, u16)
+ELF_GEN_FIELD_GET_SET(shdr, sh_size, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_offset, u64)
+ELF_GEN_FIELD_GET_SET(shdr, sh_name, u32)
+ELF_GEN_FIELD_GET_SET(shdr, sh_addr, u64)
+
+#define ELF_STRUCT_SIZE(__s) \
+static inline unsigned long elf_size_of_##__s(u8 class) \
+{ \
+ if (class == ELFCLASS32)\
+ return sizeof(struct elf32_##__s); \
+ else \
+ return sizeof(struct elf64_##__s); \
+}
+
+ELF_STRUCT_SIZE(shdr)
+ELF_STRUCT_SIZE(phdr)
+ELF_STRUCT_SIZE(hdr)
+
+static inline unsigned int elf_strtbl_add(const char *name, void *ehdr, u8 class, size_t *index)
+{
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
+ void *shdr;
+ char *strtab;
+ size_t idx, ret;
+
+ shdr = ehdr + elf_size_of_hdr(class) + shstrndx * elf_size_of_shdr(class);
+ strtab = ehdr + elf_shdr_get_sh_offset(class, shdr);
+ idx = index ? *index : 0;
+ if (!strtab || !name)
+ return 0;
+
+ ret = idx;
+ strcpy((strtab + idx), name);
+ idx += strlen(name) + 1;
+ if (index)
+ *index = idx;
+
+ return ret;
+}
+
+#endif /* REMOTEPROC_ELF_LOADER_H */
diff --git a/drivers/remoteproc/remoteproc_elf_loader.c b/drivers/remoteproc/remoteproc_elf_loader.c
index 606aae166eba..5a412d7b6e0b 100644
--- a/drivers/remoteproc/remoteproc_elf_loader.c
+++ b/drivers/remoteproc/remoteproc_elf_loader.c
@@ -23,20 +23,31 @@
#include <linux/elf.h>
#include "remoteproc_internal.h"
+#include "remoteproc_elf_helpers.h"
/**
- * rproc_elf_sanity_check() - Sanity Check ELF firmware image
+ * rproc_elf_sanity_check() - Sanity Check for ELF32/ELF64 firmware image
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
- * Make sure this fw image is sane.
+ * Make sure this fw image is sane (ie a correct ELF32/ELF64 file).
+ *
+ * Return: 0 on success and -EINVAL upon any failure
*/
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
const char *name = rproc->firmware;
struct device *dev = &rproc->dev;
+ /*
+ * Elf files are beginning with the same structure. Thus, to simplify
+ * header parsing, we can use the elf32_hdr one for both elf64 and
+ * elf32.
+ */
struct elf32_hdr *ehdr;
+ u32 elf_shdr_get_size;
+ u64 phoff, shoff;
char class;
+ u16 phnum;
if (!fw) {
dev_err(dev, "failed to load %s\n", name);
@@ -50,13 +61,22 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
ehdr = (struct elf32_hdr *)fw->data;
- /* We only support ELF32 at this point */
+ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+ dev_err(dev, "Image is corrupted (bad magic)\n");
+ return -EINVAL;
+ }
+
class = ehdr->e_ident[EI_CLASS];
- if (class != ELFCLASS32) {
+ if (class != ELFCLASS32 && class != ELFCLASS64) {
dev_err(dev, "Unsupported class: %d\n", class);
return -EINVAL;
}
+ if (class == ELFCLASS64 && fw->size < sizeof(struct elf64_hdr)) {
+ dev_err(dev, "elf64 header is too small\n");
+ return -EINVAL;
+ }
+
/* We assume the firmware has the same endianness as the host */
# ifdef __LITTLE_ENDIAN
if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
@@ -67,26 +87,29 @@ int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw)
return -EINVAL;
}
- if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
- dev_err(dev, "Image is too small\n");
- return -EINVAL;
- }
+ phoff = elf_hdr_get_e_phoff(class, fw->data);
+ shoff = elf_hdr_get_e_shoff(class, fw->data);
+ phnum = elf_hdr_get_e_phnum(class, fw->data);
+ elf_shdr_get_size = elf_size_of_shdr(class);
- if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
- dev_err(dev, "Image is corrupted (bad magic)\n");
+ if (fw->size < shoff + elf_shdr_get_size) {
+ dev_err(dev, "Image is too small\n");
return -EINVAL;
}
- if (ehdr->e_phnum == 0) {
+ if (phnum == 0) {
dev_err(dev, "No loadable segments\n");
return -EINVAL;
}
- if (ehdr->e_phoff > fw->size) {
+ if (phoff > fw->size) {
dev_err(dev, "Firmware size is too small\n");
return -EINVAL;
}
+ dev_dbg(dev, "Firmware is an elf%d file\n",
+ class == ELFCLASS32 ? 32 : 64);
+
return 0;
}
EXPORT_SYMBOL(rproc_elf_sanity_check);
@@ -96,17 +119,15 @@ EXPORT_SYMBOL(rproc_elf_sanity_check);
* @rproc: the remote processor handle
* @fw: the ELF firmware image
*
- * This function returns the entry point address of the ELF
- * image.
- *
* Note that the boot address is not a configurable property of all remote
* processors. Some will always boot at a specific hard-coded address.
+ *
+ * Return: entry point address of the ELF image
+ *
*/
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
-
- return ehdr->e_entry;
+ return elf_hdr_get_e_entry(fw_elf_get_class(fw), fw->data);
}
EXPORT_SYMBOL(rproc_elf_get_boot_addr);
@@ -133,57 +154,76 @@ EXPORT_SYMBOL(rproc_elf_get_boot_addr);
* might be different: they might not have iommus, and would prefer to
* directly allocate memory for every segment/resource. This is not yet
* supported, though.
+ *
+ * Return: 0 on success and an appropriate error code otherwise
*/
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
{
struct device *dev = &rproc->dev;
- struct elf32_hdr *ehdr;
- struct elf32_phdr *phdr;
+ const void *ehdr, *phdr;
int i, ret = 0;
+ u16 phnum;
const u8 *elf_data = fw->data;
+ u8 class = fw_elf_get_class(fw);
+ u32 elf_phdr_get_size = elf_size_of_phdr(class);
- ehdr = (struct elf32_hdr *)elf_data;
- phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff);
+ ehdr = elf_data;
+ phnum = elf_hdr_get_e_phnum(class, ehdr);
+ phdr = elf_data + elf_hdr_get_e_phoff(class, ehdr);
/* go through the available ELF segments */
- for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
- u32 da = phdr->p_paddr;
- u32 memsz = phdr->p_memsz;
- u32 filesz = phdr->p_filesz;
- u32 offset = phdr->p_offset;
+ for (i = 0; i < phnum; i++, phdr += elf_phdr_get_size) {
+ u64 da = elf_phdr_get_p_paddr(class, phdr);
+ u64 memsz = elf_phdr_get_p_memsz(class, phdr);
+ u64 filesz = elf_phdr_get_p_filesz(class, phdr);
+ u64 offset = elf_phdr_get_p_offset(class, phdr);
+ u32 type = elf_phdr_get_p_type(class, phdr);
+ bool is_iomem = false;
void *ptr;
- if (phdr->p_type != PT_LOAD)
+ if (type != PT_LOAD || !memsz)
continue;
- dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n",
- phdr->p_type, da, memsz, filesz);
+ dev_dbg(dev, "phdr: type %d da 0x%llx memsz 0x%llx filesz 0x%llx\n",
+ type, da, memsz, filesz);
if (filesz > memsz) {
- dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n",
+ dev_err(dev, "bad phdr filesz 0x%llx memsz 0x%llx\n",
filesz, memsz);
ret = -EINVAL;
break;
}
if (offset + filesz > fw->size) {
- dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
+ dev_err(dev, "truncated fw: need 0x%llx avail 0x%zx\n",
offset + filesz, fw->size);
ret = -EINVAL;
break;
}
+ if (!rproc_u64_fit_in_size_t(memsz)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ memsz);
+ ret = -EOVERFLOW;
+ break;
+ }
+
/* grab the kernel address for this device address */
- ptr = rproc_da_to_va(rproc, da, memsz);
+ ptr = rproc_da_to_va(rproc, da, memsz, &is_iomem);
if (!ptr) {
- dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz);
+ dev_err(dev, "bad phdr da 0x%llx mem 0x%llx\n", da,
+ memsz);
ret = -EINVAL;
break;
}
/* put the segment where the remote processor expects it */
- if (phdr->p_filesz)
- memcpy(ptr, elf_data + phdr->p_offset, filesz);
+ if (filesz) {
+ if (is_iomem)
+ memcpy_toio((void __iomem *)ptr, elf_data + offset, filesz);
+ else
+ memcpy(ptr, elf_data + offset, filesz);
+ }
/*
* Zero out remaining memory for this segment.
@@ -192,32 +232,47 @@ int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw)
* did this for us. albeit harmless, we may consider removing
* this.
*/
- if (memsz > filesz)
- memset(ptr + filesz, 0, memsz - filesz);
+ if (memsz > filesz) {
+ if (is_iomem)
+ memset_io((void __iomem *)(ptr + filesz), 0, memsz - filesz);
+ else
+ memset(ptr + filesz, 0, memsz - filesz);
+ }
}
return ret;
}
EXPORT_SYMBOL(rproc_elf_load_segments);
-static struct elf32_shdr *
-find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
+static const void *
+find_table(struct device *dev, const struct firmware *fw)
{
- struct elf32_shdr *shdr;
+ const void *shdr, *name_table_shdr;
int i;
const char *name_table;
struct resource_table *table = NULL;
- const u8 *elf_data = (void *)ehdr;
+ const u8 *elf_data = (void *)fw->data;
+ u8 class = fw_elf_get_class(fw);
+ size_t fw_size = fw->size;
+ const void *ehdr = elf_data;
+ u16 shnum = elf_hdr_get_e_shnum(class, ehdr);
+ u32 elf_shdr_get_size = elf_size_of_shdr(class);
+ u16 shstrndx = elf_hdr_get_e_shstrndx(class, ehdr);
/* look for the resource table and handle it */
- shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff);
- name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset;
-
- for (i = 0; i < ehdr->e_shnum; i++, shdr++) {
- u32 size = shdr->sh_size;
- u32 offset = shdr->sh_offset;
-
- if (strcmp(name_table + shdr->sh_name, ".resource_table"))
+ /* First, get the section header according to the elf class */
+ shdr = elf_data + elf_hdr_get_e_shoff(class, ehdr);
+ /* Compute name table section header entry in shdr array */
+ name_table_shdr = shdr + (shstrndx * elf_shdr_get_size);
+ /* Finally, compute the name table section address in elf */
+ name_table = elf_data + elf_shdr_get_sh_offset(class, name_table_shdr);
+
+ for (i = 0; i < shnum; i++, shdr += elf_shdr_get_size) {
+ u64 size = elf_shdr_get_sh_size(class, shdr);
+ u64 offset = elf_shdr_get_sh_offset(class, shdr);
+ u32 name = elf_shdr_get_sh_name(class, shdr);
+
+ if (strcmp(name_table + name, ".resource_table"))
continue;
table = (struct resource_table *)(elf_data + offset);
@@ -270,21 +325,21 @@ find_table(struct device *dev, struct elf32_hdr *ehdr, size_t fw_size)
*/
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw)
{
- struct elf32_hdr *ehdr;
- struct elf32_shdr *shdr;
+ const void *shdr;
struct device *dev = &rproc->dev;
struct resource_table *table = NULL;
const u8 *elf_data = fw->data;
size_t tablesz;
+ u8 class = fw_elf_get_class(fw);
+ u64 sh_offset;
- ehdr = (struct elf32_hdr *)elf_data;
-
- shdr = find_table(dev, ehdr, fw->size);
+ shdr = find_table(dev, fw);
if (!shdr)
return -EINVAL;
- table = (struct resource_table *)(elf_data + shdr->sh_offset);
- tablesz = shdr->sh_size;
+ sh_offset = elf_shdr_get_sh_offset(class, shdr);
+ table = (struct resource_table *)(elf_data + sh_offset);
+ tablesz = elf_shdr_get_sh_size(class, shdr);
/*
* Create a copy of the resource table. When a virtio device starts
@@ -311,19 +366,30 @@ EXPORT_SYMBOL(rproc_elf_load_rsc_table);
* This function finds the location of the loaded resource table. Don't
* call this function if the table wasn't loaded yet - it's a bug if you do.
*
- * Returns the pointer to the resource table if it is found or NULL otherwise.
+ * Return: pointer to the resource table if it is found or NULL otherwise.
* If the table wasn't loaded yet the result is unspecified.
*/
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
const struct firmware *fw)
{
- struct elf32_hdr *ehdr = (struct elf32_hdr *)fw->data;
- struct elf32_shdr *shdr;
+ const void *shdr;
+ u64 sh_addr, sh_size;
+ u8 class = fw_elf_get_class(fw);
+ struct device *dev = &rproc->dev;
- shdr = find_table(&rproc->dev, ehdr, fw->size);
+ shdr = find_table(&rproc->dev, fw);
if (!shdr)
return NULL;
- return rproc_da_to_va(rproc, shdr->sh_addr, shdr->sh_size);
+ sh_addr = elf_shdr_get_sh_addr(class, shdr);
+ sh_size = elf_shdr_get_sh_size(class, shdr);
+
+ if (!rproc_u64_fit_in_size_t(sh_size)) {
+ dev_err(dev, "size (%llx) does not fit in size_t type\n",
+ sh_size);
+ return NULL;
+ }
+
+ return rproc_da_to_va(rproc, sh_addr, sh_size, NULL);
}
EXPORT_SYMBOL(rproc_elf_find_loaded_rsc_table);
diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h
index 493ef9262411..72d4d3d7d94d 100644
--- a/drivers/remoteproc/remoteproc_internal.h
+++ b/drivers/remoteproc/remoteproc_internal.h
@@ -28,6 +28,8 @@ struct rproc_debug_trace {
void rproc_release(struct kref *kref);
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id);
void rproc_vdev_release(struct kref *ref);
+int rproc_of_parse_firmware(struct device *dev, int index,
+ const char **fw_name);
/* from remoteproc_virtio.c */
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id);
@@ -47,15 +49,46 @@ extern struct class rproc_class;
int rproc_init_sysfs(void);
void rproc_exit_sysfs(void);
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
+
+#ifdef CONFIG_REMOTEPROC_CDEV
+void rproc_init_cdev(void);
+void rproc_exit_cdev(void);
+int rproc_char_device_add(struct rproc *rproc);
+void rproc_char_device_remove(struct rproc *rproc);
+#else
+static inline void rproc_init_cdev(void)
+{
+}
+
+static inline void rproc_exit_cdev(void)
+{
+}
+
+/*
+ * The character device interface is an optional feature, if it is not enabled
+ * the function should not return an error.
+ */
+static inline int rproc_char_device_add(struct rproc *rproc)
+{
+ return 0;
+}
+
+static inline void rproc_char_device_remove(struct rproc *rproc)
+{
+}
+#endif
+
void rproc_free_vring(struct rproc_vring *rvring);
int rproc_alloc_vring(struct rproc_vdev *rvdev, int i);
-void *rproc_da_to_va(struct rproc *rproc, u64 da, int len);
phys_addr_t rproc_va_to_pa(void *cpu_addr);
int rproc_trigger_recovery(struct rproc *rproc);
int rproc_elf_sanity_check(struct rproc *rproc, const struct firmware *fw);
-u32 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
+u64 rproc_elf_get_boot_addr(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_segments(struct rproc *rproc, const struct firmware *fw);
int rproc_elf_load_rsc_table(struct rproc *rproc, const struct firmware *fw);
struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
@@ -63,6 +96,30 @@ struct resource_table *rproc_elf_find_loaded_rsc_table(struct rproc *rproc,
struct rproc_mem_entry *
rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...);
+static inline int rproc_prepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->prepare)
+ return rproc->ops->prepare(rproc);
+
+ return 0;
+}
+
+static inline int rproc_unprepare_device(struct rproc *rproc)
+{
+ if (rproc->ops->unprepare)
+ return rproc->ops->unprepare(rproc);
+
+ return 0;
+}
+
+static inline int rproc_attach_device(struct rproc *rproc)
+{
+ if (rproc->ops->attach)
+ return rproc->ops->attach(rproc);
+
+ return 0;
+}
+
static inline
int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
{
@@ -73,7 +130,7 @@ int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw)
}
static inline
-u32 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
+u64 rproc_get_boot_addr(struct rproc *rproc, const struct firmware *fw)
{
if (rproc->ops->get_boot_addr)
return rproc->ops->get_boot_addr(rproc, fw);
@@ -119,4 +176,23 @@ struct resource_table *rproc_find_loaded_rsc_table(struct rproc *rproc,
return NULL;
}
+static inline
+struct resource_table *rproc_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ if (rproc->ops->get_loaded_rsc_table)
+ return rproc->ops->get_loaded_rsc_table(rproc, size);
+
+ return NULL;
+}
+
+static inline
+bool rproc_u64_fit_in_size_t(u64 val)
+{
+ if (sizeof(size_t) == sizeof(u64))
+ return true;
+
+ return (val <= (size_t) -1);
+}
+
#endif /* REMOTEPROC_INTERNAL_H */
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 7f8536b73295..8c7ea8922638 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -4,58 +4,156 @@
*/
#include <linux/remoteproc.h>
+#include <linux/slab.h>
#include "remoteproc_internal.h"
#define to_rproc(d) container_of(d, struct rproc, dev)
-/* Expose the loaded / running firmware name via sysfs */
-static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t recovery_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct rproc *rproc = to_rproc(dev);
- return sprintf(buf, "%s\n", rproc->firmware);
+ return sysfs_emit(buf, "%s", rproc->recovery_disabled ? "disabled\n" : "enabled\n");
}
-/* Change firmware name via sysfs */
-static ssize_t firmware_store(struct device *dev,
+/*
+ * By writing to the 'recovery' sysfs entry, we control the behavior of the
+ * recovery mechanism dynamically. The default value of this entry is "enabled".
+ *
+ * The 'recovery' sysfs entry supports these commands:
+ *
+ * enabled: When enabled, the remote processor will be automatically
+ * recovered whenever it crashes. Moreover, if the remote
+ * processor crashes while recovery is disabled, it will
+ * be automatically recovered too as soon as recovery is enabled.
+ *
+ * disabled: When disabled, a remote processor will remain in a crashed
+ * state if it crashes. This is useful for debugging purposes;
+ * without it, debugging a crash is substantially harder.
+ *
+ * recover: This function will trigger an immediate recovery if the
+ * remote processor is in a crashed state, without changing
+ * or checking the recovery state (enabled/disabled).
+ * This is useful during debugging sessions, when one expects
+ * additional crashes to happen after enabling recovery. In this
+ * case, enabling recovery will make it hard to debug subsequent
+ * crashes, so it's recommended to keep recovery disabled, and
+ * instead use the "recover" command as needed.
+ */
+static ssize_t recovery_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct rproc *rproc = to_rproc(dev);
- char *p;
- int err, len = count;
- err = mutex_lock_interruptible(&rproc->lock);
- if (err) {
- dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, err);
+ if (sysfs_streq(buf, "enabled")) {
+ /* change the flag and begin the recovery process if needed */
+ rproc->recovery_disabled = false;
+ rproc_trigger_recovery(rproc);
+ } else if (sysfs_streq(buf, "disabled")) {
+ rproc->recovery_disabled = true;
+ } else if (sysfs_streq(buf, "recover")) {
+ /* begin the recovery process without changing the flag */
+ rproc_trigger_recovery(rproc);
+ } else {
return -EINVAL;
}
- if (rproc->state != RPROC_OFFLINE) {
- dev_err(dev, "can't change firmware while running\n");
- err = -EBUSY;
- goto out;
- }
+ return count;
+}
+static DEVICE_ATTR_RW(recovery);
+
+/*
+ * A coredump-configuration-to-string lookup table, for exposing a
+ * human readable configuration via sysfs. Always keep in sync with
+ * enum rproc_coredump_mechanism
+ */
+static const char * const rproc_coredump_str[] = {
+ [RPROC_COREDUMP_DISABLED] = "disabled",
+ [RPROC_COREDUMP_ENABLED] = "enabled",
+ [RPROC_COREDUMP_INLINE] = "inline",
+};
- len = strcspn(buf, "\n");
- if (!len) {
- dev_err(dev, "can't provide a NULL firmware\n");
- err = -EINVAL;
- goto out;
+/* Expose the current coredump configuration via debugfs */
+static ssize_t coredump_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ return sysfs_emit(buf, "%s\n", rproc_coredump_str[rproc->dump_conf]);
+}
+
+/*
+ * By writing to the 'coredump' sysfs entry, we control the behavior of the
+ * coredump mechanism dynamically. The default value of this entry is "default".
+ *
+ * The 'coredump' sysfs entry supports these commands:
+ *
+ * disabled: This is the default coredump mechanism. Recovery will proceed
+ * without collecting any dump.
+ *
+ * default: When the remoteproc crashes the entire coredump will be
+ * copied to a separate buffer and exposed to userspace.
+ *
+ * inline: The coredump will not be copied to a separate buffer and the
+ * recovery process will have to wait until data is read by
+ * userspace. But this avoid usage of extra memory.
+ */
+static ssize_t coredump_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+
+ if (rproc->state == RPROC_CRASHED) {
+ dev_err(&rproc->dev, "can't change coredump configuration\n");
+ return -EBUSY;
}
- p = kstrndup(buf, len, GFP_KERNEL);
- if (!p) {
- err = -ENOMEM;
- goto out;
+ if (sysfs_streq(buf, "disabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_DISABLED;
+ } else if (sysfs_streq(buf, "enabled")) {
+ rproc->dump_conf = RPROC_COREDUMP_ENABLED;
+ } else if (sysfs_streq(buf, "inline")) {
+ rproc->dump_conf = RPROC_COREDUMP_INLINE;
+ } else {
+ dev_err(&rproc->dev, "Invalid coredump configuration\n");
+ return -EINVAL;
}
- kfree(rproc->firmware);
- rproc->firmware = p;
-out:
- mutex_unlock(&rproc->lock);
+ return count;
+}
+static DEVICE_ATTR_RW(coredump);
+
+/* Expose the loaded / running firmware name via sysfs */
+static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct rproc *rproc = to_rproc(dev);
+ const char *firmware = rproc->firmware;
+
+ /*
+ * If the remote processor has been started by an external
+ * entity we have no idea of what image it is running. As such
+ * simply display a generic string rather then rproc->firmware.
+ */
+ if (rproc->state == RPROC_ATTACHED)
+ firmware = "unknown";
+
+ return sprintf(buf, "%s\n", firmware);
+}
+
+/* Change firmware name via sysfs */
+static ssize_t firmware_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct rproc *rproc = to_rproc(dev);
+ int err;
+
+ err = rproc_set_firmware(rproc, buf);
return err ? err : count;
}
@@ -71,6 +169,8 @@ static const char * const rproc_state_string[] = {
[RPROC_RUNNING] = "running",
[RPROC_CRASHED] = "crashed",
[RPROC_DELETED] = "deleted",
+ [RPROC_ATTACHED] = "attached",
+ [RPROC_DETACHED] = "detached",
[RPROC_LAST] = "invalid",
};
@@ -94,17 +194,13 @@ static ssize_t state_store(struct device *dev,
int ret = 0;
if (sysfs_streq(buf, "start")) {
- if (rproc->state == RPROC_RUNNING)
- return -EBUSY;
-
ret = rproc_boot(rproc);
if (ret)
dev_err(&rproc->dev, "Boot failed: %d\n", ret);
} else if (sysfs_streq(buf, "stop")) {
- if (rproc->state != RPROC_RUNNING)
- return -EINVAL;
-
- rproc_shutdown(rproc);
+ ret = rproc_shutdown(rproc);
+ } else if (sysfs_streq(buf, "detach")) {
+ ret = rproc_detach(rproc);
} else {
dev_err(&rproc->dev, "Unrecognised option: %s\n", buf);
ret = -EINVAL;
@@ -123,7 +219,25 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_RO(name);
+static umode_t rproc_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct rproc *rproc = to_rproc(dev);
+ umode_t mode = attr->mode;
+
+ if (rproc->sysfs_read_only && (attr == &dev_attr_recovery.attr ||
+ attr == &dev_attr_firmware.attr ||
+ attr == &dev_attr_state.attr ||
+ attr == &dev_attr_coredump.attr))
+ mode = 0444;
+
+ return mode;
+}
+
static struct attribute *rproc_attrs[] = {
+ &dev_attr_coredump.attr,
+ &dev_attr_recovery.attr,
&dev_attr_firmware.attr,
&dev_attr_state.attr,
&dev_attr_name.attr,
@@ -131,7 +245,8 @@ static struct attribute *rproc_attrs[] = {
};
static const struct attribute_group rproc_devgroup = {
- .attrs = rproc_attrs
+ .attrs = rproc_attrs,
+ .is_visible = rproc_is_visible,
};
static const struct attribute_group *rproc_devgroups[] = {
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 8c07cb2ca8ba..70ab496d0431 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -9,7 +9,7 @@
* Brian Swetland <swetland@google.com>
*/
-#include <linux/dma-mapping.h>
+#include <linux/dma-map-ops.h>
#include <linux/export.h>
#include <linux/of_reserved_mem.h>
#include <linux/remoteproc.h>
@@ -23,6 +23,18 @@
#include "remoteproc_internal.h"
+static struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev)
+{
+ return container_of(vdev->dev.parent, struct rproc_vdev, dev);
+}
+
+static struct rproc *vdev_to_rproc(struct virtio_device *vdev)
+{
+ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
+
+ return rvdev->rproc;
+}
+
/* kick the remote processor, and let it know which virtqueue to poke at */
static bool rproc_virtio_notify(struct virtqueue *vq)
{
@@ -45,7 +57,7 @@ static bool rproc_virtio_notify(struct virtqueue *vq)
* when the remote processor signals that a specific virtqueue has pending
* messages available.
*
- * Returns IRQ_NONE if no message was found in the @notifyid virtqueue,
+ * Return: IRQ_NONE if no message was found in the @notifyid virtqueue,
* and otherwise returns IRQ_HANDLED.
*/
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
@@ -320,11 +332,12 @@ static void rproc_virtio_dev_release(struct device *dev)
/**
* rproc_add_virtio_dev() - register an rproc-induced virtio device
* @rvdev: the remote vdev
+ * @id: the device type identification (used to match it with a driver).
*
* This function registers a virtio device. This vdev's partent is
* the rproc device.
*
- * Returns 0 on success or an appropriate error value otherwise.
+ * Return: 0 on success or an appropriate error value otherwise
*/
int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{
@@ -334,6 +347,12 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
struct rproc_mem_entry *mem;
int ret;
+ if (rproc->ops->kick == NULL) {
+ ret = -EINVAL;
+ dev_err(dev, ".kick method not defined for %s\n", rproc->name);
+ goto out;
+ }
+
/* Try to find dedicated vdev buffer carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index);
if (mem) {
@@ -368,6 +387,18 @@ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
goto out;
}
}
+ } else {
+ struct device_node *np = rproc->dev.parent->of_node;
+
+ /*
+ * If we don't have dedicated buffer, just attempt to re-assign
+ * the reserved memory from our parent. A default memory-region
+ * at index 0 from the parent's memory-regions is assigned for
+ * the rvdev dev to allocate from. Failure is non-critical and
+ * the allocations will fall back to global pools, so don't
+ * check return value either.
+ */
+ of_reserved_mem_device_init_by_idx(dev, np, 0);
}
/* Allocate virtio device */
@@ -413,6 +444,8 @@ out:
* @data: must be null
*
* This function unregisters an existing virtio device.
+ *
+ * Return: 0
*/
int rproc_remove_virtio_dev(struct device *dev, void *data)
{
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index ee13d23b43a9..a3268d95a50e 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -190,7 +190,7 @@ static int st_rproc_start(struct rproc *rproc)
}
}
- dev_info(&rproc->dev, "Started from 0x%x\n", rproc->bootaddr);
+ dev_info(&rproc->dev, "Started from 0x%llx\n", rproc->bootaddr);
return 0;
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 04492fead3c8..4ed9467897e5 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -174,7 +174,7 @@ static int slim_rproc_stop(struct rproc *rproc)
return 0;
}
-static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct st_slim_rproc *slim_rproc = rproc->priv;
void *va = NULL;
@@ -191,7 +191,7 @@ static void *slim_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
}
}
- dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%x va = 0x%pK\n",
+ dev_dbg(&rproc->dev, "da = 0x%llx len = 0x%zx va = 0x%pK\n",
da, len, va);
return va;
@@ -216,7 +216,7 @@ static const struct rproc_ops slim_rproc_ops = {
* obtains and enables any clocks required by the SLIM core and also
* ioremaps the various IO.
*
- * Returns st_slim_rproc pointer or PTR_ERR() on error.
+ * Return: st_slim_rproc pointer or PTR_ERR() on error.
*/
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index a18f88044111..7d782ed9e589 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -19,6 +19,7 @@
#include <linux/regmap.h>
#include <linux/remoteproc.h>
#include <linux/reset.h>
+#include <linux/slab.h>
#include <linux/workqueue.h>
#include "remoteproc_internal.h"
@@ -27,7 +28,7 @@
#define RELEASE_BOOT 1
#define MBOX_NB_VQ 2
-#define MBOX_NB_MBX 3
+#define MBOX_NB_MBX 4
#define STM32_SMC_RCC 0x82001000
#define STM32_SMC_REG_WRITE 0x1
@@ -37,6 +38,16 @@
#define STM32_MBX_VQ1 "vq1"
#define STM32_MBX_VQ1_ID 1
#define STM32_MBX_SHUTDOWN "shutdown"
+#define STM32_MBX_DETACH "detach"
+
+#define RSC_TBL_SIZE 1024
+
+#define M4_STATE_OFF 0
+#define M4_STATE_INI 1
+#define M4_STATE_CRUN 2
+#define M4_STATE_CSTOP 3
+#define M4_STATE_STANDBY 4
+#define M4_STATE_CRASH 5
struct stm32_syscon {
struct regmap *map;
@@ -70,12 +81,15 @@ struct stm32_rproc {
struct reset_control *rst;
struct stm32_syscon hold_boot;
struct stm32_syscon pdds;
+ struct stm32_syscon m4_state;
+ struct stm32_syscon rsctbl;
int wdg_irq;
u32 nb_rmems;
struct stm32_rproc_mem *rmems;
struct stm32_mbox mb[MBOX_NB_MBX];
struct workqueue_struct *workqueue;
bool secured_soc;
+ void __iomem *rsc_va;
};
static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da)
@@ -127,10 +141,10 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
return 0;
}
-static int stm32_rproc_of_memory_translations(struct rproc *rproc)
+static int stm32_rproc_of_memory_translations(struct platform_device *pdev,
+ struct stm32_rproc *ddata)
{
- struct device *parent, *dev = rproc->dev.parent;
- struct stm32_rproc *ddata = rproc->priv;
+ struct device *parent, *dev = &pdev->dev;
struct device_node *np;
struct stm32_rproc_mem *p_mems;
struct stm32_rproc_mem_ranges *mem_range;
@@ -194,16 +208,7 @@ static int stm32_rproc_mbox_idx(struct rproc *rproc, const unsigned char *name)
return -EINVAL;
}
-static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc,
- const struct firmware *fw)
-{
- if (rproc_elf_load_rsc_table(rproc, fw))
- dev_warn(&rproc->dev, "no resource table found for this firmware\n");
-
- return 0;
-}
-
-static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+static int stm32_rproc_prepare(struct rproc *rproc)
{
struct device *dev = rproc->dev.parent;
struct device_node *np = dev->of_node;
@@ -256,12 +261,21 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
index++;
}
- return stm32_rproc_elf_load_rsc_table(rproc, fw);
+ return 0;
+}
+
+static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ if (rproc_elf_load_rsc_table(rproc, fw))
+ dev_warn(&rproc->dev, "no resource table found for this firmware\n");
+
+ return 0;
}
static irqreturn_t stm32_rproc_wdg(int irq, void *data)
{
- struct rproc *rproc = data;
+ struct platform_device *pdev = data;
+ struct rproc *rproc = platform_get_drvdata(pdev);
rproc_report_crash(rproc, RPROC_WATCHDOG);
@@ -323,6 +337,15 @@ static const struct stm32_mbox stm32_rproc_mbox[MBOX_NB_MBX] = {
.tx_done = NULL,
.tx_tout = 500, /* 500 ms time out */
},
+ },
+ {
+ .name = STM32_MBX_DETACH,
+ .vq_id = -1,
+ .client = {
+ .tx_block = true,
+ .tx_done = NULL,
+ .tx_tout = 200, /* 200 ms time out to detach should be fair enough */
+ },
}
};
@@ -346,8 +369,13 @@ static int stm32_rproc_request_mbox(struct rproc *rproc)
ddata->mb[i].chan = mbox_request_channel_byname(cl, name);
if (IS_ERR(ddata->mb[i].chan)) {
- if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER)
+ if (PTR_ERR(ddata->mb[i].chan) == -EPROBE_DEFER) {
+ dev_err_probe(dev->parent,
+ PTR_ERR(ddata->mb[i].chan),
+ "failed to request mailbox %s\n",
+ name);
goto err_probe;
+ }
dev_warn(dev, "cannot get %s mbox\n", name);
ddata->mb[i].chan = NULL;
}
@@ -436,18 +464,40 @@ static int stm32_rproc_start(struct rproc *rproc)
return stm32_rproc_set_hold_boot(rproc, true);
}
+static int stm32_rproc_attach(struct rproc *rproc)
+{
+ stm32_rproc_add_coredump_trace(rproc);
+
+ return stm32_rproc_set_hold_boot(rproc, true);
+}
+
+static int stm32_rproc_detach(struct rproc *rproc)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ int err, idx;
+
+ /* Inform the remote processor of the detach */
+ idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_DETACH);
+ if (idx >= 0 && ddata->mb[idx].chan) {
+ err = mbox_send_message(ddata->mb[idx].chan, "stop");
+ if (err < 0)
+ dev_warn(&rproc->dev, "warning: remote FW detach without ack\n");
+ }
+
+ /* Allow remote processor to auto-reboot */
+ return stm32_rproc_set_hold_boot(rproc, false);
+}
+
static int stm32_rproc_stop(struct rproc *rproc)
{
struct stm32_rproc *ddata = rproc->priv;
- int err, dummy_data, idx;
+ int err, idx;
/* request shutdown of the remote processor */
- if (rproc->state != RPROC_OFFLINE) {
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
if (idx >= 0 && ddata->mb[idx].chan) {
- /* a dummy data is sent to allow to block on transmit */
- err = mbox_send_message(ddata->mb[idx].chan,
- &dummy_data);
+ err = mbox_send_message(ddata->mb[idx].chan, "detach");
if (err < 0)
dev_warn(&rproc->dev, "warning: remote FW shutdown without ack\n");
}
@@ -473,6 +523,18 @@ static int stm32_rproc_stop(struct rproc *rproc)
}
}
+ /* update coprocessor state to OFF if available */
+ if (ddata->m4_state.map) {
+ err = regmap_update_bits(ddata->m4_state.map,
+ ddata->m4_state.reg,
+ ddata->m4_state.mask,
+ M4_STATE_OFF);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set copro state\n");
+ return err;
+ }
+ }
+
return 0;
}
@@ -490,7 +552,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
continue;
if (!ddata->mb[i].chan)
return;
- err = mbox_send_message(ddata->mb[i].chan, (void *)(long)vqid);
+ err = mbox_send_message(ddata->mb[i].chan, "kick");
if (err < 0)
dev_err(&rproc->dev, "%s: failed (%s, err:%d)\n",
__func__, ddata->mb[i].name, err);
@@ -498,13 +560,89 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid)
}
}
-static struct rproc_ops st_rproc_ops = {
+static int stm32_rproc_da_to_pa(struct rproc *rproc,
+ u64 da, phys_addr_t *pa)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ struct stm32_rproc_mem *p_mem;
+ unsigned int i;
+
+ for (i = 0; i < ddata->nb_rmems; i++) {
+ p_mem = &ddata->rmems[i];
+
+ if (da < p_mem->dev_addr ||
+ da >= p_mem->dev_addr + p_mem->size)
+ continue;
+
+ *pa = da - p_mem->dev_addr + p_mem->bus_addr;
+ dev_dbg(dev, "da %llx to pa %pap\n", da, pa);
+
+ return 0;
+ }
+
+ dev_err(dev, "can't translate da %llx\n", da);
+
+ return -EINVAL;
+}
+
+static struct resource_table *
+stm32_rproc_get_loaded_rsc_table(struct rproc *rproc, size_t *table_sz)
+{
+ struct stm32_rproc *ddata = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ phys_addr_t rsc_pa;
+ u32 rsc_da;
+ int err;
+
+ /* The resource table has already been mapped, nothing to do */
+ if (ddata->rsc_va)
+ goto done;
+
+ err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da);
+ if (err) {
+ dev_err(dev, "failed to read rsc tbl addr\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!rsc_da)
+ /* no rsc table */
+ return ERR_PTR(-ENOENT);
+
+ err = stm32_rproc_da_to_pa(rproc, rsc_da, &rsc_pa);
+ if (err)
+ return ERR_PTR(err);
+
+ ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE);
+ if (IS_ERR_OR_NULL(ddata->rsc_va)) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &rsc_pa, RSC_TBL_SIZE);
+ ddata->rsc_va = NULL;
+ return ERR_PTR(-ENOMEM);
+ }
+
+done:
+ /*
+ * Assuming the resource table fits in 1kB is fair.
+ * Notice for the detach, that this 1 kB memory area has to be reserved in the coprocessor
+ * firmware for the resource table. On detach, the remoteproc core re-initializes this
+ * entire area by overwriting it with the initial values stored in rproc->clean_table.
+ */
+ *table_sz = RSC_TBL_SIZE;
+ return (struct resource_table *)ddata->rsc_va;
+}
+
+static const struct rproc_ops st_rproc_ops = {
+ .prepare = stm32_rproc_prepare,
.start = stm32_rproc_start,
.stop = stm32_rproc_stop,
+ .attach = stm32_rproc_attach,
+ .detach = stm32_rproc_detach,
.kick = stm32_rproc_kick,
.load = rproc_elf_load_segments,
.parse_fw = stm32_rproc_parse_fw,
.find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .get_loaded_rsc_table = stm32_rproc_get_loaded_rsc_table,
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
};
@@ -537,27 +675,25 @@ out:
return err;
}
-static int stm32_rproc_parse_dt(struct platform_device *pdev)
+static int stm32_rproc_parse_dt(struct platform_device *pdev,
+ struct stm32_rproc *ddata, bool *auto_boot)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct rproc *rproc = platform_get_drvdata(pdev);
- struct stm32_rproc *ddata = rproc->priv;
struct stm32_syscon tz;
unsigned int tzen;
int err, irq;
irq = platform_get_irq(pdev, 0);
if (irq == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ return dev_err_probe(dev, irq, "failed to get interrupt\n");
if (irq > 0) {
err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
- dev_name(dev), rproc);
- if (err) {
- dev_err(dev, "failed to request wdg irq\n");
- return err;
- }
+ dev_name(dev), pdev);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to request wdg irq\n");
ddata->wdg_irq = irq;
@@ -570,10 +706,9 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
}
ddata->rst = devm_reset_control_get_by_index(dev, 0);
- if (IS_ERR(ddata->rst)) {
- dev_err(dev, "failed to get mcu reset\n");
- return PTR_ERR(ddata->rst);
- }
+ if (IS_ERR(ddata->rst))
+ return dev_err_probe(dev, PTR_ERR(ddata->rst),
+ "failed to get mcu_reset\n");
/*
* if platform is secured the hold boot bit must be written by
@@ -588,7 +723,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = regmap_read(tz.map, tz.reg, &tzen);
if (err) {
- dev_err(&rproc->dev, "failed to read tzen\n");
+ dev_err(dev, "failed to read tzen\n");
return err;
}
ddata->secured_soc = tzen & tz.mask;
@@ -602,11 +737,51 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev)
err = stm32_rproc_get_syscon(np, "st,syscfg-pdds", &ddata->pdds);
if (err)
- dev_warn(dev, "failed to get pdds\n");
+ dev_info(dev, "failed to get pdds\n");
- rproc->auto_boot = of_property_read_bool(np, "st,auto-boot");
+ *auto_boot = of_property_read_bool(np, "st,auto-boot");
+
+ /*
+ * See if we can check the M4 status, i.e if it was started
+ * from the boot loader or not.
+ */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state",
+ &ddata->m4_state);
+ if (err) {
+ /* remember this */
+ ddata->m4_state.map = NULL;
+ /* no coprocessor state syscon (optional) */
+ dev_warn(dev, "m4 state not supported\n");
- return stm32_rproc_of_memory_translations(rproc);
+ /* no need to go further */
+ return 0;
+ }
+
+ /* See if we can get the resource table */
+ err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl",
+ &ddata->rsctbl);
+ if (err) {
+ /* no rsc table syscon (optional) */
+ dev_warn(dev, "rsc tbl syscon not supported\n");
+ }
+
+ return 0;
+}
+
+static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata,
+ unsigned int *state)
+{
+ /* See stm32_rproc_parse_dt() */
+ if (!ddata->m4_state.map) {
+ /*
+ * We couldn't get the coprocessor's state, assume
+ * it is not running.
+ */
+ *state = M4_STATE_OFF;
+ return 0;
+ }
+
+ return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state);
}
static int stm32_rproc_probe(struct platform_device *pdev)
@@ -615,6 +790,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
struct stm32_rproc *ddata;
struct device_node *np = dev->of_node;
struct rproc *rproc;
+ unsigned int state;
int ret;
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -625,24 +801,38 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (!rproc)
return -ENOMEM;
- rproc->has_iommu = false;
ddata = rproc->priv;
+
+ rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE);
+
+ ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_of_memory_translations(pdev, ddata);
+ if (ret)
+ goto free_rproc;
+
+ ret = stm32_rproc_get_m4_status(ddata, &state);
+ if (ret)
+ goto free_rproc;
+
+ if (state == M4_STATE_CRUN)
+ rproc->state = RPROC_DETACHED;
+
+ rproc->has_iommu = false;
ddata->workqueue = create_workqueue(dev_name(dev));
if (!ddata->workqueue) {
dev_err(dev, "cannot create workqueue\n");
ret = -ENOMEM;
- goto free_rproc;
+ goto free_resources;
}
platform_set_drvdata(pdev, rproc);
- ret = stm32_rproc_parse_dt(pdev);
- if (ret)
- goto free_wkq;
-
ret = stm32_rproc_request_mbox(rproc);
if (ret)
- goto free_rproc;
+ goto free_wkq;
ret = rproc_add(rproc);
if (ret)
@@ -654,6 +844,8 @@ free_mb:
stm32_rproc_free_mbox(rproc);
free_wkq:
destroy_workqueue(ddata->workqueue);
+free_resources:
+ rproc_resource_cleanup(rproc);
free_rproc:
if (device_may_wakeup(dev)) {
dev_pm_clear_wake_irq(dev);
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
new file mode 100644
index 000000000000..eb9c64f7b9b4
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -0,0 +1,908 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 DSP Remote Processor(s) driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1)
+
+/**
+ * struct k3_dsp_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address of the memory region from DSP view
+ * @size: Size of the memory region
+ */
+struct k3_dsp_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/**
+ * struct k3_dsp_mem_data - memory definitions for a DSP
+ * @name: name for this memory entry
+ * @dev_addr: device address for the memory entry
+ */
+struct k3_dsp_mem_data {
+ const char *name;
+ const u32 dev_addr;
+};
+
+/**
+ * struct k3_dsp_dev_data - device data structure for a DSP
+ * @mems: pointer to memory definitions for a DSP
+ * @num_mems: number of memory regions in @mems
+ * @boot_align_addr: boot vector address alignment granularity
+ * @uses_lreset: flag to denote the need for local reset management
+ */
+struct k3_dsp_dev_data {
+ const struct k3_dsp_mem_data *mems;
+ u32 num_mems;
+ u32 boot_align_addr;
+ bool uses_lreset;
+};
+
+/**
+ * struct k3_dsp_rproc - k3 DSP remote processor driver structure
+ * @dev: cached device pointer
+ * @rproc: remoteproc device handle
+ * @mem: internal memory regions data
+ * @num_mems: number of internal memory regions
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ * @reset: reset control handle
+ * @data: pointer to DSP-specific device data
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ */
+struct k3_dsp_rproc {
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_dsp_mem *mem;
+ int num_mems;
+ struct k3_dsp_mem *rmem;
+ int num_rmems;
+ struct reset_control *reset;
+ const struct k3_dsp_dev_data *data;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+};
+
+/**
+ * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 DSP rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/*
+ * Kick the remote processor to notify about pending unprocessed messages.
+ * The vqid usage is not used and is inconsequential, as the kick is performed
+ * through a simulated GPIO (a bit in an IPC interrupt-triggering register),
+ * the remote processor is expected to process both its Tx and Rx virtqueues.
+ */
+static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+/* Put the DSP processor into reset */
+static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = reset_control_assert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset assert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ if (kproc->data->uses_lreset)
+ return ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+ if (reset_control_deassert(kproc->reset))
+ dev_warn(dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+/* Release the DSP processor from reset */
+static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ int ret;
+
+ if (kproc->data->uses_lreset)
+ goto lreset;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "module-reset deassert failed, ret = %d\n", ret);
+ return ret;
+ }
+
+lreset:
+ ret = reset_control_deassert(kproc->reset);
+ if (ret) {
+ dev_err(dev, "local-reset deassert failed, ret = %d\n", ret);
+ if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id))
+ dev_warn(dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_dsp_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_dsp_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+/*
+ * The C66x DSP cores have a local reset that affects only the CPU, and a
+ * generic module reset that powers on the device and allows the DSP internal
+ * memories to be accessed while the local reset is asserted. This function is
+ * used to release the global reset on C66x DSPs to allow loading into the DSP
+ * internal RAMs. The .prepare() ops is invoked by remoteproc core before any
+ * firmware loading, and is followed by the .start() ops after loading to
+ * actually let the C66x DSP cores run. This callback is invoked only in
+ * remoteproc mode.
+ */
+static int k3_dsp_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n",
+ ret);
+
+ return ret;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * global reset on applicable C66x cores. This completes the second portion of
+ * powering down the C66x DSP cores. The cores themselves are only halted in the
+ * .stop() callback through the local reset, and the .unprepare() ops is invoked
+ * by the remoteproc core after the remoteproc is stopped to balance the global
+ * reset. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci,
+ kproc->ti_sci_id);
+ if (ret)
+ dev_err(dev, "module-reset assert failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * Power up the DSP remote processor.
+ *
+ * This function will be invoked only after the firmware for this rproc
+ * was loaded, parsed successfully, and all of its resource requirements
+ * were met. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_start(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ u32 boot_addr;
+ int ret;
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ boot_addr = rproc->bootaddr;
+ if (boot_addr & (kproc->data->boot_align_addr - 1)) {
+ dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n",
+ boot_addr, kproc->data->boot_align_addr);
+ ret = -EINVAL;
+ goto put_mbox;
+ }
+
+ dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr);
+ ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ ret = k3_dsp_rproc_release(kproc);
+ if (ret)
+ goto put_mbox;
+
+ return 0;
+
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * Stop the DSP remote processor.
+ *
+ * This function puts the DSP processor into reset, and finishes processing
+ * of any pending messages. This callback is invoked only in remoteproc mode.
+ */
+static int k3_dsp_rproc_stop(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+
+ mbox_free_channel(kproc->mbox);
+
+ k3_dsp_rproc_reset(kproc);
+
+ return 0;
+}
+
+/*
+ * Attach to a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the DSP core. This callback is invoked only in IPC-only
+ * mode.
+ */
+static int k3_dsp_rproc_attach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_dsp_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "DSP initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running DSP remote processor (IPC-only mode)
+ *
+ * This rproc detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the DSP core is not stopped and will
+ * be left to continue to run its booted firmware. This callback is invoked only
+ * in IPC-only mode.
+ */
+static int k3_dsp_rproc_detach(struct rproc *rproc)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "DSP deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for a booted DSP in IPC-only mode. The K3 DSP
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_dsp_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Custom function to translate a DSP device address (internal RAMs only) to a
+ * kernel virtual address. The DSPs can access their RAMs at either an internal
+ * address visible only from a DSP, or at the SoC-level bus address. Both these
+ * addresses need to be looked through for translation. The translated addresses
+ * can be used either by the remoteproc core for loading (when using kernel
+ * remoteproc loader), or by any rpmsg bus drivers.
+ */
+static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_dsp_rproc *kproc = rproc->priv;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ for (i = 0; i < kproc->num_mems; i++) {
+ bus_addr = kproc->mem[i].bus_addr;
+ dev_addr = kproc->mem[i].dev_addr;
+ size = kproc->mem[i].size;
+
+ if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) {
+ /* handle DSP-view addresses */
+ if (da >= dev_addr &&
+ ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ } else {
+ /* handle SoC-view addresses */
+ if (da >= bus_addr &&
+ (da + len) <= (bus_addr + size)) {
+ offset = da - bus_addr;
+ va = kproc->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_dsp_rproc_ops = {
+ .start = k3_dsp_rproc_start,
+ .stop = k3_dsp_rproc_stop,
+ .kick = k3_dsp_rproc_kick,
+ .da_to_va = k3_dsp_rproc_da_to_va,
+};
+
+static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
+ struct k3_dsp_rproc *kproc)
+{
+ const struct k3_dsp_dev_data *data = kproc->data;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems = 0;
+ int i;
+
+ num_mems = kproc->data->num_mems;
+ kproc->mem = devm_kcalloc(kproc->dev, num_mems,
+ sizeof(*kproc->mem), GFP_KERNEL);
+ if (!kproc->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ data->mems[i].name);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ data->mems[i].name);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ data->mems[i].name);
+ return -EBUSY;
+ }
+
+ kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!kproc->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n",
+ data->mems[i].name);
+ return -ENOMEM;
+ }
+ kproc->mem[i].bus_addr = res->start;
+ kproc->mem[i].dev_addr = data->mems[i].dev_addr;
+ kproc->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ data->mems[i].name, &kproc->mem[i].bus_addr,
+ kproc->mem[i].size, kproc->mem[i].cpu_addr,
+ kproc->mem[i].dev_addr);
+ }
+ kproc->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /* 64-bit address regions currently not supported */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(kproc->dev);
+ return ret;
+}
+
+static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+static
+struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_dsp_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct k3_dsp_dev_data *data;
+ struct k3_dsp_rproc *kproc;
+ struct rproc *rproc;
+ const char *fw_name;
+ bool p_state = false;
+ int ret = 0;
+ int ret1;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
+
+ ret = rproc_of_parse_firmware(dev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
+ sizeof(*kproc));
+ if (!rproc)
+ return -ENOMEM;
+
+ rproc->has_iommu = false;
+ rproc->recovery_disabled = true;
+ if (data->uses_lreset) {
+ rproc->ops->prepare = k3_dsp_rproc_prepare;
+ rproc->ops->unprepare = k3_dsp_rproc_unprepare;
+ }
+ kproc = rproc->priv;
+ kproc->rproc = rproc;
+ kproc->dev = dev;
+ kproc->data = data;
+
+ kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
+ if (IS_ERR(kproc->ti_sci)) {
+ ret = PTR_ERR(kproc->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ kproc->ti_sci = NULL;
+ goto free_rproc;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto put_sci;
+ }
+
+ kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(kproc->reset)) {
+ ret = PTR_ERR(kproc->reset);
+ dev_err(dev, "failed to get reset, status = %d\n", ret);
+ goto put_sci;
+ }
+
+ kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
+ if (IS_ERR(kproc->tsp)) {
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ ret = PTR_ERR(kproc->tsp);
+ goto put_sci;
+ }
+
+ ret = ti_sci_proc_request(kproc->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto free_tsp;
+ }
+
+ ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
+ if (ret)
+ goto release_tsp;
+
+ ret = k3_dsp_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n", ret);
+ goto release_tsp;
+ }
+
+ ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
+ NULL, &p_state);
+ if (ret) {
+ dev_err(dev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ /* configure J721E devices for either remoteproc or IPC-only mode */
+ if (p_state) {
+ dev_info(dev, "configured DSP for IPC-only mode\n");
+ rproc->state = RPROC_DETACHED;
+ /* override rproc ops with only required IPC-only mode ops */
+ rproc->ops->prepare = NULL;
+ rproc->ops->unprepare = NULL;
+ rproc->ops->start = NULL;
+ rproc->ops->stop = NULL;
+ rproc->ops->attach = k3_dsp_rproc_attach;
+ rproc->ops->detach = k3_dsp_rproc_detach;
+ rproc->ops->get_loaded_rsc_table = k3_dsp_get_loaded_rsc_table;
+ } else {
+ dev_info(dev, "configured DSP for remoteproc mode\n");
+ /*
+ * ensure the DSP local reset is asserted to ensure the DSP
+ * doesn't execute bogus code in .prepare() when the module
+ * reset is released.
+ */
+ if (data->uses_lreset) {
+ ret = reset_control_status(kproc->reset);
+ if (ret < 0) {
+ dev_err(dev, "failed to get reset status, status = %d\n",
+ ret);
+ goto release_mem;
+ } else if (ret == 0) {
+ dev_warn(dev, "local reset is deasserted for device\n");
+ k3_dsp_rproc_reset(kproc);
+ }
+ }
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "failed to add register device with remoteproc core, status = %d\n",
+ ret);
+ goto release_mem;
+ }
+
+ platform_set_drvdata(pdev, kproc);
+
+ return 0;
+
+release_mem:
+ k3_dsp_reserved_mem_exit(kproc);
+release_tsp:
+ ret1 = ti_sci_proc_release(kproc->tsp);
+ if (ret1)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret1);
+free_tsp:
+ kfree(kproc->tsp);
+put_sci:
+ ret1 = ti_sci_put_handle(kproc->ti_sci);
+ if (ret1)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1);
+free_rproc:
+ rproc_free(rproc);
+ return ret;
+}
+
+static int k3_dsp_rproc_remove(struct platform_device *pdev)
+{
+ struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev);
+ struct rproc *rproc = kproc->rproc;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(dev, "failed to detach proc, ret = %d\n", ret);
+ return ret;
+ }
+ }
+
+ rproc_del(kproc->rproc);
+
+ ret = ti_sci_proc_release(kproc->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ kfree(kproc->tsp);
+
+ ret = ti_sci_put_handle(kproc->ti_sci);
+ if (ret)
+ dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret);
+
+ k3_dsp_reserved_mem_exit(kproc);
+ rproc_free(kproc->rproc);
+
+ return 0;
+}
+
+static const struct k3_dsp_mem_data c66_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1pram", .dev_addr = 0xe00000 },
+ { .name = "l1dram", .dev_addr = 0xf00000 },
+};
+
+/* C71x cores only have a L1P Cache, there are no L1P SRAMs */
+static const struct k3_dsp_mem_data c71_mems[] = {
+ { .name = "l2sram", .dev_addr = 0x800000 },
+ { .name = "l1dram", .dev_addr = 0xe00000 },
+};
+
+static const struct k3_dsp_dev_data c66_data = {
+ .mems = c66_mems,
+ .num_mems = ARRAY_SIZE(c66_mems),
+ .boot_align_addr = SZ_1K,
+ .uses_lreset = true,
+};
+
+static const struct k3_dsp_dev_data c71_data = {
+ .mems = c71_mems,
+ .num_mems = ARRAY_SIZE(c71_mems),
+ .boot_align_addr = SZ_2M,
+ .uses_lreset = false,
+};
+
+static const struct of_device_id k3_dsp_of_match[] = {
+ { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
+ { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
+
+static struct platform_driver k3_dsp_rproc_driver = {
+ .probe = k3_dsp_rproc_probe,
+ .remove = k3_dsp_rproc_remove,
+ .driver = {
+ .name = "k3-dsp-rproc",
+ .of_match_table = k3_dsp_of_match,
+ },
+};
+
+module_platform_driver(k3_dsp_rproc_driver);
+
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver");
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
new file mode 100644
index 000000000000..4840ad906018
--- /dev/null
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -0,0 +1,1797 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI K3 R5F (MCU) Remote Processor driver
+ *
+ * Copyright (C) 2017-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/omap-mailbox.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include "omap_remoteproc.h"
+#include "remoteproc_internal.h"
+#include "ti_sci_proc.h"
+
+/* This address can either be for ATCM or BTCM with the other at address 0x0 */
+#define K3_R5_TCM_DEV_ADDR 0x41010000
+
+/* R5 TI-SCI Processor Configuration Flags */
+#define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
+#define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
+#define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
+#define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
+#define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
+#define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
+#define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
+#define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
+/* Available from J7200 SoCs onwards */
+#define PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS 0x00004000
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE 0x00008000
+
+/* R5 TI-SCI Processor Control Flags */
+#define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
+
+/* R5 TI-SCI Processor Status Flags */
+#define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
+#define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
+#define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
+#define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
+/* Applicable to only AM64x SoCs */
+#define PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY 0x00000200
+
+/**
+ * struct k3_r5_mem - internal memory structure
+ * @cpu_addr: MPU virtual address of the memory region
+ * @bus_addr: Bus address used to access the memory region
+ * @dev_addr: Device address from remoteproc view
+ * @size: Size of the memory region
+ */
+struct k3_r5_mem {
+ void __iomem *cpu_addr;
+ phys_addr_t bus_addr;
+ u32 dev_addr;
+ size_t size;
+};
+
+/*
+ * All cluster mode values are not applicable on all SoCs. The following
+ * are the modes supported on various SoCs:
+ * Split mode : AM65x, J721E, J7200 and AM64x SoCs
+ * LockStep mode : AM65x, J721E and J7200 SoCs
+ * Single-CPU mode : AM64x SoCs only
+ */
+enum cluster_mode {
+ CLUSTER_MODE_SPLIT = 0,
+ CLUSTER_MODE_LOCKSTEP,
+ CLUSTER_MODE_SINGLECPU,
+};
+
+/**
+ * struct k3_r5_soc_data - match data to handle SoC variations
+ * @tcm_is_double: flag to denote the larger unified TCMs in certain modes
+ * @tcm_ecc_autoinit: flag to denote the auto-initialization of TCMs for ECC
+ * @single_cpu_mode: flag to denote if SoC/IP supports Single-CPU mode
+ */
+struct k3_r5_soc_data {
+ bool tcm_is_double;
+ bool tcm_ecc_autoinit;
+ bool single_cpu_mode;
+};
+
+/**
+ * struct k3_r5_cluster - K3 R5F Cluster structure
+ * @dev: cached device pointer
+ * @mode: Mode to configure the Cluster - Split or LockStep
+ * @cores: list of R5 cores within the cluster
+ * @soc_data: SoC-specific feature data for a R5FSS
+ */
+struct k3_r5_cluster {
+ struct device *dev;
+ enum cluster_mode mode;
+ struct list_head cores;
+ const struct k3_r5_soc_data *soc_data;
+};
+
+/**
+ * struct k3_r5_core - K3 R5 core structure
+ * @elem: linked list item
+ * @dev: cached device pointer
+ * @rproc: rproc handle representing this core
+ * @mem: internal memory regions data
+ * @sram: on-chip SRAM memory regions data
+ * @num_mems: number of internal memory regions
+ * @num_sram: number of on-chip SRAM memory regions
+ * @reset: reset control handle
+ * @tsp: TI-SCI processor control handle
+ * @ti_sci: TI-SCI handle
+ * @ti_sci_id: TI-SCI device identifier
+ * @atcm_enable: flag to control ATCM enablement
+ * @btcm_enable: flag to control BTCM enablement
+ * @loczrama: flag to dictate which TCM is at device address 0x0
+ */
+struct k3_r5_core {
+ struct list_head elem;
+ struct device *dev;
+ struct rproc *rproc;
+ struct k3_r5_mem *mem;
+ struct k3_r5_mem *sram;
+ int num_mems;
+ int num_sram;
+ struct reset_control *reset;
+ struct ti_sci_proc *tsp;
+ const struct ti_sci_handle *ti_sci;
+ u32 ti_sci_id;
+ u32 atcm_enable;
+ u32 btcm_enable;
+ u32 loczrama;
+};
+
+/**
+ * struct k3_r5_rproc - K3 remote processor state
+ * @dev: cached device pointer
+ * @cluster: cached pointer to parent cluster structure
+ * @mbox: mailbox channel handle
+ * @client: mailbox client to request the mailbox channel
+ * @rproc: rproc handle
+ * @core: cached pointer to r5 core structure being used
+ * @rmem: reserved memory regions data
+ * @num_rmems: number of reserved memory regions
+ */
+struct k3_r5_rproc {
+ struct device *dev;
+ struct k3_r5_cluster *cluster;
+ struct mbox_chan *mbox;
+ struct mbox_client client;
+ struct rproc *rproc;
+ struct k3_r5_core *core;
+ struct k3_r5_mem *rmem;
+ int num_rmems;
+};
+
+/**
+ * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
+ * @client: mailbox client pointer used for requesting the mailbox channel
+ * @data: mailbox payload
+ *
+ * This handler is invoked by the OMAP mailbox driver whenever a mailbox
+ * message is received. Usually, the mailbox payload simply contains
+ * the index of the virtqueue that is kicked by the remote processor,
+ * and we let remoteproc core handle it.
+ *
+ * In addition to virtqueue indices, we also have some out-of-band values
+ * that indicate different events. Those values are deliberately very
+ * large so they don't coincide with virtqueue indices.
+ */
+static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
+{
+ struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
+ client);
+ struct device *dev = kproc->rproc->dev.parent;
+ const char *name = kproc->rproc->name;
+ u32 msg = omap_mbox_message(data);
+
+ dev_dbg(dev, "mbox msg: 0x%x\n", msg);
+
+ switch (msg) {
+ case RP_MBOX_CRASH:
+ /*
+ * remoteproc detected an exception, but error recovery is not
+ * supported. So, just log this for now
+ */
+ dev_err(dev, "K3 R5F rproc %s crashed\n", name);
+ break;
+ case RP_MBOX_ECHO_REPLY:
+ dev_info(dev, "received echo reply from %s\n", name);
+ break;
+ default:
+ /* silently handle all other valid messages */
+ if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
+ return;
+ if (msg > kproc->rproc->max_notifyid) {
+ dev_dbg(dev, "dropping unknown message 0x%x", msg);
+ return;
+ }
+ /* msg contains the index of the triggered vring */
+ if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
+ dev_dbg(dev, "no message was found in vqid %d\n", msg);
+ }
+}
+
+/* kick a virtqueue */
+static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = rproc->dev.parent;
+ mbox_msg_t msg = (mbox_msg_t)vqid;
+ int ret;
+
+ /* send the index of the triggered virtqueue in the mailbox payload */
+ ret = mbox_send_message(kproc->mbox, (void *)msg);
+ if (ret < 0)
+ dev_err(dev, "failed to send mailbox message, status = %d\n",
+ ret);
+}
+
+static int k3_r5_split_reset(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_split_release(struct k3_r5_core *core)
+{
+ int ret;
+
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
+ ret);
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* assert local reset on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = reset_control_assert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "local-reset assert failed, ret = %d\n",
+ ret);
+ core = list_prev_entry(core, elem);
+ goto unroll_local_reset;
+ }
+ }
+
+ /* disable PSC modules on all applicable cores */
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset assert failed, ret = %d\n",
+ ret);
+ goto unroll_module_reset;
+ }
+ }
+
+ return 0;
+
+unroll_module_reset:
+ list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+ core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_local_reset:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (reset_control_deassert(core->reset))
+ dev_warn(core->dev, "local-reset deassert back failed\n");
+ }
+
+ return ret;
+}
+
+static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
+{
+ struct k3_r5_core *core;
+ int ret;
+
+ /* enable PSC modules on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
+ core->ti_sci_id);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ core = list_next_entry(core, elem);
+ goto unroll_module_reset;
+ }
+ }
+
+ /* deassert local reset on all applicable cores */
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = reset_control_deassert(core->reset);
+ if (ret) {
+ dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
+ ret);
+ goto unroll_local_reset;
+ }
+ }
+
+ return 0;
+
+unroll_local_reset:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (reset_control_assert(core->reset))
+ dev_warn(core->dev, "local-reset assert back failed\n");
+ }
+ core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+unroll_module_reset:
+ list_for_each_entry_from(core, &cluster->cores, elem) {
+ if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
+ core->ti_sci_id))
+ dev_warn(core->dev, "module-reset assert back failed\n");
+ }
+
+ return ret;
+}
+
+static inline int k3_r5_core_halt(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
+}
+
+static inline int k3_r5_core_run(struct k3_r5_core *core)
+{
+ return ti_sci_proc_set_control(core->tsp,
+ 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
+}
+
+static int k3_r5_rproc_request_mbox(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct mbox_client *client = &kproc->client;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ client->dev = dev;
+ client->tx_done = NULL;
+ client->rx_callback = k3_r5_rproc_mbox_callback;
+ client->tx_block = false;
+ client->knows_txdone = false;
+
+ kproc->mbox = mbox_request_channel(client, 0);
+ if (IS_ERR(kproc->mbox)) {
+ ret = -EBUSY;
+ dev_err(dev, "mbox_request_channel failed: %ld\n",
+ PTR_ERR(kproc->mbox));
+ return ret;
+ }
+
+ /*
+ * Ping the remote processor, this is only for sanity-sake for now;
+ * there is no functional effect whatsoever.
+ *
+ * Note that the reply will _not_ arrive immediately: this message
+ * will wait in the mailbox fifo until the remote processor is booted.
+ */
+ ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
+ if (ret < 0) {
+ dev_err(dev, "mbox_send_message failed: %d\n", ret);
+ mbox_free_channel(kproc->mbox);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * The R5F cores have controls for both a reset and a halt/run. The code
+ * execution from DDR requires the initial boot-strapping code to be run
+ * from the internal TCMs. This function is used to release the resets on
+ * applicable cores to allow loading into the TCMs. The .prepare() ops is
+ * invoked by remoteproc core before any firmware loading, and is followed
+ * by the .start() ops after loading to actually let the R5 cores run.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to
+ * execute code, but combines the TCMs from both cores. The resets for both
+ * cores need to be released to make this possible, as the TCMs are in general
+ * private to each core. Only Core0 needs to be unhalted for running the
+ * cluster in this mode. The function uses the same reset logic as LockStep
+ * mode for this (though the behavior is agnostic of the reset release order).
+ * This callback is invoked only in remoteproc mode.
+ */
+static int k3_r5_rproc_prepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u64 boot_vec = 0;
+ bool mem_init_dis;
+ int ret;
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat);
+ if (ret < 0)
+ return ret;
+ mem_init_dis = !!(cfg & PROC_BOOT_CFG_FLAG_R5_MEM_INIT_DIS);
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
+ if (ret) {
+ dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * Newer IP revisions like on J7200 SoCs support h/w auto-initialization
+ * of TCMs, so there is no need to perform the s/w memzero. This bit is
+ * configurable through System Firmware, the default value does perform
+ * auto-init, but account for it in case it is disabled
+ */
+ if (cluster->soc_data->tcm_ecc_autoinit && !mem_init_dis) {
+ dev_dbg(dev, "leveraging h/w init for TCM memories\n");
+ return 0;
+ }
+
+ /*
+ * Zero out both TCMs unconditionally (access from v8 Arm core is not
+ * affected by ATCM & BTCM enable configuration values) so that ECC
+ * can be effective on all TCM addresses.
+ */
+ dev_dbg(dev, "zeroing out ATCM memory\n");
+ memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
+
+ dev_dbg(dev, "zeroing out BTCM memory\n");
+ memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
+
+ return 0;
+}
+
+/*
+ * This function implements the .unprepare() ops and performs the complimentary
+ * operations to that of the .prepare() ops. The function is used to assert the
+ * resets on all applicable cores for the rproc device (depending on LockStep
+ * or Split mode). This completes the second portion of powering down the R5F
+ * cores. The cores themselves are only halted in the .stop() ops, and the
+ * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
+ * stopped.
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) combines the TCMs from
+ * both cores. The access is made possible only with releasing the resets for
+ * both cores, but with only Core0 unhalted. This function re-uses the same
+ * reset assert logic as LockStep mode for this mode (though the behavior is
+ * agnostic of the reset assert order). This callback is invoked only in
+ * remoteproc mode.
+ */
+static int k3_r5_rproc_unprepare(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ /* Re-use LockStep-mode reset logic for Single-CPU mode */
+ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
+ if (ret)
+ dev_err(dev, "unable to disable cores, ret = %d\n", ret);
+
+ return ret;
+}
+
+/*
+ * The R5F start sequence includes two different operations
+ * 1. Configure the boot vector for R5F core(s)
+ * 2. Unhalt/Run the R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes. The LockStep
+ * mode requires the boot vector to be configured only for Core0, and then
+ * unhalt both the cores to start the execution - Core1 needs to be unhalted
+ * first followed by Core0. The Split-mode requires that Core0 to be maintained
+ * always in a higher power state that Core1 (implying Core1 needs to be started
+ * always only after Core0 is started).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be unhalted. The function uses the same logic
+ * flow as Split-mode for this. This callback is invoked only in remoteproc
+ * mode.
+ */
+static int k3_r5_rproc_start(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core;
+ u32 boot_addr;
+ int ret;
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ boot_addr = rproc->bootaddr;
+ /* TODO: add boot_addr sanity checking */
+ dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
+
+ /* boot vector need not be programmed for Core1 in LockStep mode */
+ core = kproc->core;
+ ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
+ if (ret)
+ goto put_mbox;
+
+ /* unhalt/run all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry_reverse(core, &cluster->cores, elem) {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto unroll_core_run;
+ }
+ } else {
+ ret = k3_r5_core_run(core);
+ if (ret)
+ goto put_mbox;
+ }
+
+ return 0;
+
+unroll_core_run:
+ list_for_each_entry_continue(core, &cluster->cores, elem) {
+ if (k3_r5_core_halt(core))
+ dev_warn(core->dev, "core halt back failed\n");
+ }
+put_mbox:
+ mbox_free_channel(kproc->mbox);
+ return ret;
+}
+
+/*
+ * The R5F stop function includes the following operations
+ * 1. Halt R5F core(s)
+ *
+ * The sequence is different between LockStep and Split modes, and the order
+ * of cores the operations are performed are also in general reverse to that
+ * of the start function. The LockStep mode requires each operation to be
+ * performed first on Core0 followed by Core1. The Split-mode requires that
+ * Core0 to be maintained always in a higher power state that Core1 (implying
+ * Core1 needs to be stopped first before Core0).
+ *
+ * The Single-CPU mode on applicable SoCs (eg: AM64x) only uses Core0 to execute
+ * code, so only Core0 needs to be halted. The function uses the same logic
+ * flow as Split-mode for this.
+ *
+ * Note that the R5F halt operation in general is not effective when the R5F
+ * core is running, but is needed to make sure the core won't run after
+ * deasserting the reset the subsequent time. The asserting of reset can
+ * be done here, but is preferred to be done in the .unprepare() ops - this
+ * maintains the symmetric behavior between the .start(), .stop(), .prepare()
+ * and .unprepare() ops, and also balances them well between sysfs 'state'
+ * flow and device bind/unbind or module removal. This callback is invoked
+ * only in remoteproc mode.
+ */
+static int k3_r5_rproc_stop(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ int ret;
+
+ /* halt all applicable cores */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ list_for_each_entry(core, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(core);
+ if (ret) {
+ core = list_prev_entry(core, elem);
+ goto unroll_core_halt;
+ }
+ }
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+ }
+
+ mbox_free_channel(kproc->mbox);
+
+ return 0;
+
+unroll_core_halt:
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ if (k3_r5_core_run(core))
+ dev_warn(core->dev, "core run back failed\n");
+ }
+out:
+ return ret;
+}
+
+/*
+ * Attach to a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F attach callback only needs to request the mailbox, the remote
+ * processor is already booted, so there is no need to issue any TI-SCI
+ * commands to boot the R5F cores in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_attach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+ int ret;
+
+ ret = k3_r5_rproc_request_mbox(rproc);
+ if (ret)
+ return ret;
+
+ dev_info(dev, "R5F core initialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * Detach from a running R5F remote processor (IPC-only mode)
+ *
+ * The R5F detach callback performs the opposite operation to attach callback
+ * and only needs to release the mailbox, the R5F cores are not stopped and
+ * will be left in booted state in IPC-only mode. This callback is invoked
+ * only in IPC-only mode.
+ */
+static int k3_r5_rproc_detach(struct rproc *rproc)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ mbox_free_channel(kproc->mbox);
+ dev_info(dev, "R5F core deinitialized in IPC-only mode\n");
+ return 0;
+}
+
+/*
+ * This function implements the .get_loaded_rsc_table() callback and is used
+ * to provide the resource table for the booted R5F in IPC-only mode. The K3 R5F
+ * firmwares follow a design-by-contract approach and are expected to have the
+ * resource table at the base of the DDR region reserved for firmware usage.
+ * This provides flexibility for the remote processor to be booted by different
+ * bootloaders that may or may not have the ability to publish the resource table
+ * address and size through a DT property. This callback is invoked only in
+ * IPC-only mode.
+ */
+static struct resource_table *k3_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *rsc_table_sz)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct device *dev = kproc->dev;
+
+ if (!kproc->rmem[0].cpu_addr) {
+ dev_err(dev, "memory-region #1 does not exist, loaded rsc table can't be found");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /*
+ * NOTE: The resource table size is currently hard-coded to a maximum
+ * of 256 bytes. The most common resource table usage for K3 firmwares
+ * is to only have the vdev resource entry and an optional trace entry.
+ * The exact size could be computed based on resource table address, but
+ * the hard-coded value suffices to support the IPC-only mode.
+ */
+ *rsc_table_sz = 256;
+ return (struct resource_table *)kproc->rmem[0].cpu_addr;
+}
+
+/*
+ * Internal Memory translation helper
+ *
+ * Custom function implementing the rproc .da_to_va ops to provide address
+ * translation (device address to kernel virtual address) for internal RAMs
+ * present in a DSP or IPU device). The translated addresses can be used
+ * either by the remoteproc core for loading, or by any rpmsg bus drivers.
+ */
+static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
+{
+ struct k3_r5_rproc *kproc = rproc->priv;
+ struct k3_r5_core *core = kproc->core;
+ void __iomem *va = NULL;
+ phys_addr_t bus_addr;
+ u32 dev_addr, offset;
+ size_t size;
+ int i;
+
+ if (len == 0)
+ return NULL;
+
+ /* handle both R5 and SoC views of ATCM and BTCM */
+ for (i = 0; i < core->num_mems; i++) {
+ bus_addr = core->mem[i].bus_addr;
+ dev_addr = core->mem[i].dev_addr;
+ size = core->mem[i].size;
+
+ /* handle R5-view addresses of TCMs */
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+
+ /* handle SoC-view addresses of TCMs */
+ if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
+ offset = da - bus_addr;
+ va = core->mem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle any SRAM regions using SoC-view addresses */
+ for (i = 0; i < core->num_sram; i++) {
+ dev_addr = core->sram[i].dev_addr;
+ size = core->sram[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = core->sram[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ /* handle static DDR reserved memory regions */
+ for (i = 0; i < kproc->num_rmems; i++) {
+ dev_addr = kproc->rmem[i].dev_addr;
+ size = kproc->rmem[i].size;
+
+ if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
+ offset = da - dev_addr;
+ va = kproc->rmem[i].cpu_addr + offset;
+ return (__force void *)va;
+ }
+ }
+
+ return NULL;
+}
+
+static const struct rproc_ops k3_r5_rproc_ops = {
+ .prepare = k3_r5_rproc_prepare,
+ .unprepare = k3_r5_rproc_unprepare,
+ .start = k3_r5_rproc_start,
+ .stop = k3_r5_rproc_stop,
+ .kick = k3_r5_rproc_kick,
+ .da_to_va = k3_r5_rproc_da_to_va,
+};
+
+/*
+ * Internal R5F Core configuration
+ *
+ * Each R5FSS has a cluster-level setting for configuring the processor
+ * subsystem either in a safety/fault-tolerant LockStep mode or a performance
+ * oriented Split mode on most SoCs. A fewer SoCs support a non-safety mode
+ * as an alternate for LockStep mode that exercises only a single R5F core
+ * called Single-CPU mode. Each R5F core has a number of settings to either
+ * enable/disable each of the TCMs, control which TCM appears at the R5F core's
+ * address 0x0. These settings need to be configured before the resets for the
+ * corresponding core are released. These settings are all protected and managed
+ * by the System Processor.
+ *
+ * This function is used to pre-configure these settings for each R5F core, and
+ * the configuration is all done through various ti_sci_proc functions that
+ * communicate with the System Processor. The function also ensures that both
+ * the cores are halted before the .prepare() step.
+ *
+ * The function is called from k3_r5_cluster_rproc_init() and is invoked either
+ * once (in LockStep mode or Single-CPU modes) or twice (in Split mode). Support
+ * for LockStep-mode is dictated by an eFUSE register bit, and the config
+ * settings retrieved from DT are adjusted accordingly as per the permitted
+ * cluster mode. Another eFUSE register bit dictates if the R5F cluster only
+ * supports a Single-CPU mode. All cluster level settings like Cluster mode and
+ * TEINIT (exception handling state dictating ARM or Thumb mode) can only be set
+ * and retrieved using Core0.
+ *
+ * The function behavior is different based on the cluster mode. The R5F cores
+ * are configured independently as per their individual settings in Split mode.
+ * They are identically configured in LockStep mode using the primary Core0
+ * settings. However, some individual settings cannot be set in LockStep mode.
+ * This is overcome by switching to Split-mode initially and then programming
+ * both the cores with the same settings, before reconfiguing again for
+ * LockStep mode.
+ */
+static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct device *dev = kproc->dev;
+ struct k3_r5_core *core0, *core, *temp;
+ u32 ctrl = 0, cfg = 0, stat = 0;
+ u32 set_cfg = 0, clr_cfg = 0;
+ u64 boot_vec = 0;
+ bool lockstep_en;
+ bool single_cpu;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) {
+ core = core0;
+ } else {
+ core = kproc->core;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
+ boot_vec, cfg, ctrl, stat);
+
+ /* check if only Single-CPU mode is supported on applicable SoCs */
+ if (cluster->soc_data->single_cpu_mode) {
+ single_cpu =
+ !!(stat & PROC_BOOT_STATUS_FLAG_R5_SINGLECORE_ONLY);
+ if (single_cpu && cluster->mode == CLUSTER_MODE_SPLIT) {
+ dev_err(cluster->dev, "split-mode not permitted, force configuring for single-cpu mode\n");
+ cluster->mode = CLUSTER_MODE_SINGLECPU;
+ }
+ goto config;
+ }
+
+ /* check conventional LockStep vs Split mode configuration */
+ lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
+ if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
+ cluster->mode = CLUSTER_MODE_SPLIT;
+ }
+
+config:
+ /* always enable ARM mode and set boot vector to 0 */
+ boot_vec = 0x0;
+ if (core == core0) {
+ clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ if (cluster->soc_data->single_cpu_mode) {
+ /*
+ * Single-CPU configuration bit can only be configured
+ * on Core0 and system firmware will NACK any requests
+ * with the bit configured, so program it only on
+ * permitted cores
+ */
+ if (cluster->mode == CLUSTER_MODE_SINGLECPU)
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE;
+ } else {
+ /*
+ * LockStep configuration bit is Read-only on Split-mode
+ * _only_ devices and system firmware will NACK any
+ * requests with the bit configured, so program it only
+ * on permitted devices
+ */
+ if (lockstep_en)
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ }
+ }
+
+ if (core->atcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
+
+ if (core->btcm_enable)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
+
+ if (core->loczrama)
+ set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+ else
+ clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
+ /*
+ * work around system firmware limitations to make sure both
+ * cores are programmed symmetrically in LockStep. LockStep
+ * and TEINIT config is only allowed with Core0.
+ */
+ list_for_each_entry(temp, &cluster->cores, elem) {
+ ret = k3_r5_core_halt(temp);
+ if (ret)
+ goto out;
+
+ if (temp != core) {
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
+ }
+ ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ if (ret)
+ goto out;
+ }
+
+ set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
+ clr_cfg = 0;
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ } else {
+ ret = k3_r5_core_halt(core);
+ if (ret)
+ goto out;
+
+ ret = ti_sci_proc_set_config(core->tsp, boot_vec,
+ set_cfg, clr_cfg);
+ }
+
+out:
+ return ret;
+}
+
+static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
+{
+ struct device *dev = kproc->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct device_node *rmem_np;
+ struct reserved_mem *rmem;
+ int num_rmems;
+ int ret, i;
+
+ num_rmems = of_property_count_elems_of_size(np, "memory-region",
+ sizeof(phandle));
+ if (num_rmems <= 0) {
+ dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+ if (num_rmems < 2) {
+ dev_err(dev, "device needs at least two memory regions to be defined, num = %d\n",
+ num_rmems);
+ return -EINVAL;
+ }
+
+ /* use reserved memory region 0 for vring DMA allocations */
+ ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
+ if (ret) {
+ dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_rmems--;
+ kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem) {
+ ret = -ENOMEM;
+ goto release_rmem;
+ }
+
+ /* use remaining reserved memory regions for static carveouts */
+ for (i = 0; i < num_rmems; i++) {
+ rmem_np = of_parse_phandle(np, "memory-region", i + 1);
+ if (!rmem_np) {
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+
+ rmem = of_reserved_mem_lookup(rmem_np);
+ if (!rmem) {
+ of_node_put(rmem_np);
+ ret = -EINVAL;
+ goto unmap_rmem;
+ }
+ of_node_put(rmem_np);
+
+ kproc->rmem[i].bus_addr = rmem->base;
+ /*
+ * R5Fs do not have an MMU, but have a Region Address Translator
+ * (RAT) module that provides a fixed entry translation between
+ * the 32-bit processor addresses to 64-bit bus addresses. The
+ * RAT is programmable only by the R5F cores. Support for RAT
+ * is currently not supported, so 64-bit address regions are not
+ * supported. The absence of MMUs implies that the R5F device
+ * addresses/supported memory regions are restricted to 32-bit
+ * bus addresses, and are identical
+ */
+ kproc->rmem[i].dev_addr = (u32)rmem->base;
+ kproc->rmem[i].size = rmem->size;
+ kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ if (!kproc->rmem[i].cpu_addr) {
+ dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
+ i + 1, &rmem->base, &rmem->size);
+ ret = -ENOMEM;
+ goto unmap_rmem;
+ }
+
+ dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i + 1, &kproc->rmem[i].bus_addr,
+ kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
+ kproc->rmem[i].dev_addr);
+ }
+ kproc->num_rmems = num_rmems;
+
+ return 0;
+
+unmap_rmem:
+ for (i--; i >= 0; i--)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+release_rmem:
+ of_reserved_mem_device_release(dev);
+ return ret;
+}
+
+static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
+{
+ int i;
+
+ for (i = 0; i < kproc->num_rmems; i++)
+ iounmap(kproc->rmem[i].cpu_addr);
+ kfree(kproc->rmem);
+
+ of_reserved_mem_device_release(kproc->dev);
+}
+
+/*
+ * Each R5F core within a typical R5FSS instance has a total of 64 KB of TCMs,
+ * split equally into two 32 KB banks between ATCM and BTCM. The TCMs from both
+ * cores are usable in Split-mode, but only the Core0 TCMs can be used in
+ * LockStep-mode. The newer revisions of the R5FSS IP maximizes these TCMs by
+ * leveraging the Core1 TCMs as well in certain modes where they would have
+ * otherwise been unusable (Eg: LockStep-mode on J7200 SoCs, Single-CPU mode on
+ * AM64x SoCs). This is done by making a Core1 TCM visible immediately after the
+ * corresponding Core0 TCM. The SoC memory map uses the larger 64 KB sizes for
+ * the Core0 TCMs, and the dts representation reflects this increased size on
+ * supported SoCs. The Core0 TCM sizes therefore have to be adjusted to only
+ * half the original size in Split mode.
+ */
+static void k3_r5_adjust_tcm_sizes(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ struct k3_r5_core *core0;
+
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU ||
+ !cluster->soc_data->tcm_is_double)
+ return;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+ if (core == core0) {
+ WARN_ON(core->mem[0].size != SZ_64K);
+ WARN_ON(core->mem[1].size != SZ_64K);
+
+ core->mem[0].size /= 2;
+ core->mem[1].size /= 2;
+
+ dev_dbg(cdev, "adjusted TCM sizes, ATCM = 0x%zx BTCM = 0x%zx\n",
+ core->mem[0].size, core->mem[1].size);
+ }
+}
+
+/*
+ * This function checks and configures a R5F core for IPC-only or remoteproc
+ * mode. The driver is configured to be in IPC-only mode for a R5F core when
+ * the core has been loaded and started by a bootloader. The IPC-only mode is
+ * detected by querying the System Firmware for reset, power on and halt status
+ * and ensuring that the core is running. Any incomplete steps at bootloader
+ * are validated and errored out.
+ *
+ * In IPC-only mode, the driver state flags for ATCM, BTCM and LOCZRAMA settings
+ * and cluster mode parsed originally from kernel DT are updated to reflect the
+ * actual values configured by bootloader. The driver internal device memory
+ * addresses for TCMs are also updated.
+ */
+static int k3_r5_rproc_configure_mode(struct k3_r5_rproc *kproc)
+{
+ struct k3_r5_cluster *cluster = kproc->cluster;
+ struct k3_r5_core *core = kproc->core;
+ struct device *cdev = core->dev;
+ bool r_state = false, c_state = false;
+ u32 ctrl = 0, cfg = 0, stat = 0, halted = 0;
+ u64 boot_vec = 0;
+ u32 atcm_enable, btcm_enable, loczrama;
+ struct k3_r5_core *core0;
+ enum cluster_mode mode;
+ int ret;
+
+ core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ ret = core->ti_sci->ops.dev_ops.is_on(core->ti_sci, core->ti_sci_id,
+ &r_state, &c_state);
+ if (ret) {
+ dev_err(cdev, "failed to get initial state, mode cannot be determined, ret = %d\n",
+ ret);
+ return ret;
+ }
+ if (r_state != c_state) {
+ dev_warn(cdev, "R5F core may have been powered on by a different host, programmed state (%d) != actual state (%d)\n",
+ r_state, c_state);
+ }
+
+ ret = reset_control_status(core->reset);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial local reset status, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
+ &stat);
+ if (ret < 0) {
+ dev_err(cdev, "failed to get initial processor status, ret = %d\n",
+ ret);
+ return ret;
+ }
+ atcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_ATCM_EN ? 1 : 0;
+ btcm_enable = cfg & PROC_BOOT_CFG_FLAG_R5_BTCM_EN ? 1 : 0;
+ loczrama = cfg & PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE ? 1 : 0;
+ if (cluster->soc_data->single_cpu_mode) {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_SINGLE_CORE ?
+ CLUSTER_MODE_SINGLECPU : CLUSTER_MODE_SPLIT;
+ } else {
+ mode = cfg & PROC_BOOT_CFG_FLAG_R5_LOCKSTEP ?
+ CLUSTER_MODE_LOCKSTEP : CLUSTER_MODE_SPLIT;
+ }
+ halted = ctrl & PROC_BOOT_CTRL_FLAG_R5_CORE_HALT;
+
+ /*
+ * IPC-only mode detection requires both local and module resets to
+ * be deasserted and R5F core to be unhalted. Local reset status is
+ * irrelevant if module reset is asserted (POR value has local reset
+ * deasserted), and is deemed as remoteproc mode
+ */
+ if (c_state && !ret && !halted) {
+ dev_info(cdev, "configured R5F for IPC-only mode\n");
+ kproc->rproc->state = RPROC_DETACHED;
+ ret = 1;
+ /* override rproc ops with only required IPC-only mode ops */
+ kproc->rproc->ops->prepare = NULL;
+ kproc->rproc->ops->unprepare = NULL;
+ kproc->rproc->ops->start = NULL;
+ kproc->rproc->ops->stop = NULL;
+ kproc->rproc->ops->attach = k3_r5_rproc_attach;
+ kproc->rproc->ops->detach = k3_r5_rproc_detach;
+ kproc->rproc->ops->get_loaded_rsc_table =
+ k3_r5_get_loaded_rsc_table;
+ } else if (!c_state) {
+ dev_info(cdev, "configured R5F for remoteproc mode\n");
+ ret = 0;
+ } else {
+ dev_err(cdev, "mismatched mode: local_reset = %s, module_reset = %s, core_state = %s\n",
+ !ret ? "deasserted" : "asserted",
+ c_state ? "deasserted" : "asserted",
+ halted ? "halted" : "unhalted");
+ ret = -EINVAL;
+ }
+
+ /* fixup TCMs, cluster & core flags to actual values in IPC-only mode */
+ if (ret > 0) {
+ if (core == core0)
+ cluster->mode = mode;
+ core->atcm_enable = atcm_enable;
+ core->btcm_enable = btcm_enable;
+ core->loczrama = loczrama;
+ core->mem[0].dev_addr = loczrama ? 0 : K3_R5_TCM_DEV_ADDR;
+ core->mem[1].dev_addr = loczrama ? K3_R5_TCM_DEV_ADDR : 0;
+ }
+
+ return ret;
+}
+
+static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core, *core1;
+ struct device *cdev;
+ const char *fw_name;
+ struct rproc *rproc;
+ int ret, ret1;
+
+ core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+ list_for_each_entry(core, &cluster->cores, elem) {
+ cdev = core->dev;
+ ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
+ if (ret) {
+ dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
+ ret);
+ goto out;
+ }
+
+ rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
+ fw_name, sizeof(*kproc));
+ if (!rproc) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* K3 R5s have a Region Address Translator (RAT) but no MMU */
+ rproc->has_iommu = false;
+ /* error recovery is not supported at present */
+ rproc->recovery_disabled = true;
+
+ kproc = rproc->priv;
+ kproc->cluster = cluster;
+ kproc->core = core;
+ kproc->dev = cdev;
+ kproc->rproc = rproc;
+ core->rproc = rproc;
+
+ ret = k3_r5_rproc_configure_mode(kproc);
+ if (ret < 0)
+ goto err_config;
+ if (ret)
+ goto init_rmem;
+
+ ret = k3_r5_rproc_configure(kproc);
+ if (ret) {
+ dev_err(dev, "initial configure failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+init_rmem:
+ k3_r5_adjust_tcm_sizes(kproc);
+
+ ret = k3_r5_reserved_mem_init(kproc);
+ if (ret) {
+ dev_err(dev, "reserved memory init failed, ret = %d\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = rproc_add(rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed, ret = %d\n", ret);
+ goto err_add;
+ }
+
+ /* create only one rproc in lockstep mode or single-cpu mode */
+ if (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU)
+ break;
+ }
+
+ return 0;
+
+err_split:
+ if (rproc->state == RPROC_ATTACHED) {
+ ret1 = rproc_detach(rproc);
+ if (ret1) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n",
+ ret1);
+ return ret1;
+ }
+ }
+
+ rproc_del(rproc);
+err_add:
+ k3_r5_reserved_mem_exit(kproc);
+err_config:
+ rproc_free(rproc);
+ core->rproc = NULL;
+out:
+ /* undo core0 upon any failures on core1 in split-mode */
+ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
+ core = list_prev_entry(core, elem);
+ rproc = core->rproc;
+ kproc = rproc->priv;
+ goto err_split;
+ }
+ return ret;
+}
+
+static void k3_r5_cluster_rproc_exit(void *data)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
+ struct k3_r5_rproc *kproc;
+ struct k3_r5_core *core;
+ struct rproc *rproc;
+ int ret;
+
+ /*
+ * lockstep mode and single-cpu modes have only one rproc associated
+ * with first core, whereas split-mode has two rprocs associated with
+ * each core, and requires that core1 be powered down first
+ */
+ core = (cluster->mode == CLUSTER_MODE_LOCKSTEP ||
+ cluster->mode == CLUSTER_MODE_SINGLECPU) ?
+ list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
+ list_last_entry(&cluster->cores, struct k3_r5_core, elem);
+
+ list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
+ rproc = core->rproc;
+ kproc = rproc->priv;
+
+ if (rproc->state == RPROC_ATTACHED) {
+ ret = rproc_detach(rproc);
+ if (ret) {
+ dev_err(kproc->dev, "failed to detach rproc, ret = %d\n", ret);
+ return;
+ }
+ }
+
+ rproc_del(rproc);
+
+ k3_r5_reserved_mem_exit(kproc);
+
+ rproc_free(rproc);
+ core->rproc = NULL;
+ }
+}
+
+static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ static const char * const mem_names[] = {"atcm", "btcm"};
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int num_mems;
+ int i;
+
+ num_mems = ARRAY_SIZE(mem_names);
+ core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
+ if (!core->mem)
+ return -ENOMEM;
+
+ for (i = 0; i < num_mems; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ mem_names[i]);
+ if (!res) {
+ dev_err(dev, "found no memory resource for %s\n",
+ mem_names[i]);
+ return -EINVAL;
+ }
+ if (!devm_request_mem_region(dev, res->start,
+ resource_size(res),
+ dev_name(dev))) {
+ dev_err(dev, "could not request %s region for resource\n",
+ mem_names[i]);
+ return -EBUSY;
+ }
+
+ /*
+ * TCMs are designed in general to support RAM-like backing
+ * memories. So, map these as Normal Non-Cached memories. This
+ * also avoids/fixes any potential alignment faults due to
+ * unaligned data accesses when using memcpy() or memset()
+ * functions (normally seen with device type memory).
+ */
+ core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
+ resource_size(res));
+ if (!core->mem[i].cpu_addr) {
+ dev_err(dev, "failed to map %s memory\n", mem_names[i]);
+ return -ENOMEM;
+ }
+ core->mem[i].bus_addr = res->start;
+
+ /*
+ * TODO:
+ * The R5F cores can place ATCM & BTCM anywhere in its address
+ * based on the corresponding Region Registers in the System
+ * Control coprocessor. For now, place ATCM and BTCM at
+ * addresses 0 and 0x41010000 (same as the bus address on AM65x
+ * SoCs) based on loczrama setting
+ */
+ if (!strcmp(mem_names[i], "atcm")) {
+ core->mem[i].dev_addr = core->loczrama ?
+ 0 : K3_R5_TCM_DEV_ADDR;
+ } else {
+ core->mem[i].dev_addr = core->loczrama ?
+ K3_R5_TCM_DEV_ADDR : 0;
+ }
+ core->mem[i].size = resource_size(res);
+
+ dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ mem_names[i], &core->mem[i].bus_addr,
+ core->mem[i].size, core->mem[i].cpu_addr,
+ core->mem[i].dev_addr);
+ }
+ core->num_mems = num_mems;
+
+ return 0;
+}
+
+static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
+ struct k3_r5_core *core)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct device_node *sram_np;
+ struct resource res;
+ int num_sram;
+ int i, ret;
+
+ num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
+ if (num_sram <= 0) {
+ dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
+ num_sram);
+ return 0;
+ }
+
+ core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
+ if (!core->sram)
+ return -ENOMEM;
+
+ for (i = 0; i < num_sram; i++) {
+ sram_np = of_parse_phandle(np, "sram", i);
+ if (!sram_np)
+ return -EINVAL;
+
+ if (!of_device_is_available(sram_np)) {
+ of_node_put(sram_np);
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(sram_np, 0, &res);
+ of_node_put(sram_np);
+ if (ret)
+ return -EINVAL;
+
+ core->sram[i].bus_addr = res.start;
+ core->sram[i].dev_addr = res.start;
+ core->sram[i].size = resource_size(&res);
+ core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
+ resource_size(&res));
+ if (!core->sram[i].cpu_addr) {
+ dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
+ i, &res.start);
+ return -ENOMEM;
+ }
+
+ dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
+ i, &core->sram[i].bus_addr,
+ core->sram[i].size, core->sram[i].cpu_addr,
+ core->sram[i].dev_addr);
+ }
+ core->num_sram = num_sram;
+
+ return 0;
+}
+
+static
+struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
+ const struct ti_sci_handle *sci)
+{
+ struct ti_sci_proc *tsp;
+ u32 temp[2];
+ int ret;
+
+ ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
+ temp, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
+ if (!tsp)
+ return ERR_PTR(-ENOMEM);
+
+ tsp->dev = dev;
+ tsp->sci = sci;
+ tsp->ops = &sci->ops.proc_ops;
+ tsp->proc_id = temp[0];
+ tsp->host_id = temp[1];
+
+ return tsp;
+}
+
+static int k3_r5_core_of_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_core *core;
+ int ret;
+
+ if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
+ return -ENOMEM;
+
+ core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
+ if (!core) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ core->dev = dev;
+ /*
+ * Use SoC Power-on-Reset values as default if no DT properties are
+ * used to dictate the TCM configurations
+ */
+ core->atcm_enable = 0;
+ core->btcm_enable = 1;
+ core->loczrama = 1;
+
+ ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
+ goto err;
+ }
+
+ core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(core->ti_sci)) {
+ ret = PTR_ERR(core->ti_sci);
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
+ ret);
+ }
+ core->ti_sci = NULL;
+ goto err;
+ }
+
+ ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
+ if (ret) {
+ dev_err(dev, "missing 'ti,sci-dev-id' property\n");
+ goto err;
+ }
+
+ core->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR_OR_NULL(core->reset)) {
+ ret = PTR_ERR_OR_ZERO(core->reset);
+ if (!ret)
+ ret = -ENODEV;
+ if (ret != -EPROBE_DEFER) {
+ dev_err(dev, "failed to get reset handle, ret = %d\n",
+ ret);
+ }
+ goto err;
+ }
+
+ core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
+ if (IS_ERR(core->tsp)) {
+ ret = PTR_ERR(core->tsp);
+ dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_internal_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get internal memories, ret = %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = k3_r5_core_of_get_sram_memories(pdev, core);
+ if (ret) {
+ dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
+ goto err;
+ }
+
+ ret = ti_sci_proc_request(core->tsp);
+ if (ret < 0) {
+ dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, core);
+ devres_close_group(dev, k3_r5_core_of_init);
+
+ return 0;
+
+err:
+ devres_release_group(dev, k3_r5_core_of_init);
+ return ret;
+}
+
+/*
+ * free the resources explicitly since driver model is not being used
+ * for the child R5F devices
+ */
+static void k3_r5_core_of_exit(struct platform_device *pdev)
+{
+ struct k3_r5_core *core = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = ti_sci_proc_release(core->tsp);
+ if (ret)
+ dev_err(dev, "failed to release proc, ret = %d\n", ret);
+
+ platform_set_drvdata(pdev, NULL);
+ devres_release_group(dev, k3_r5_core_of_init);
+}
+
+static void k3_r5_cluster_of_exit(void *data)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(data);
+ struct platform_device *cpdev;
+ struct k3_r5_core *core, *temp;
+
+ list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
+ list_del(&core->elem);
+ cpdev = to_platform_device(core->dev);
+ k3_r5_core_of_exit(cpdev);
+ }
+}
+
+static int k3_r5_cluster_of_init(struct platform_device *pdev)
+{
+ struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct platform_device *cpdev;
+ struct device_node *child;
+ struct k3_r5_core *core;
+ int ret;
+
+ for_each_available_child_of_node(np, child) {
+ cpdev = of_find_device_by_node(child);
+ if (!cpdev) {
+ ret = -ENODEV;
+ dev_err(dev, "could not get R5 core platform device\n");
+ goto fail;
+ }
+
+ ret = k3_r5_core_of_init(cpdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
+ ret);
+ put_device(&cpdev->dev);
+ goto fail;
+ }
+
+ core = platform_get_drvdata(cpdev);
+ put_device(&cpdev->dev);
+ list_add_tail(&core->elem, &cluster->cores);
+ }
+
+ return 0;
+
+fail:
+ k3_r5_cluster_of_exit(pdev);
+ return ret;
+}
+
+static int k3_r5_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev_of_node(dev);
+ struct k3_r5_cluster *cluster;
+ const struct k3_r5_soc_data *data;
+ int ret;
+ int num_cores;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(dev, "SoC-specific data is not defined\n");
+ return -ENODEV;
+ }
+
+ cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
+ if (!cluster)
+ return -ENOMEM;
+
+ cluster->dev = dev;
+ /*
+ * default to most common efuse configurations - Split-mode on AM64x
+ * and LockStep-mode on all others
+ */
+ cluster->mode = data->single_cpu_mode ?
+ CLUSTER_MODE_SPLIT : CLUSTER_MODE_LOCKSTEP;
+ cluster->soc_data = data;
+ INIT_LIST_HEAD(&cluster->cores);
+
+ ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ num_cores = of_get_available_child_count(np);
+ if (num_cores != 2) {
+ dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
+ num_cores);
+ return -ENODEV;
+ }
+
+ platform_set_drvdata(pdev, cluster);
+
+ ret = devm_of_platform_populate(dev);
+ if (ret) {
+ dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = k3_r5_cluster_of_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_of_exit, pdev);
+ if (ret)
+ return ret;
+
+ ret = k3_r5_cluster_rproc_init(pdev);
+ if (ret) {
+ dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, k3_r5_cluster_rproc_exit, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct k3_r5_soc_data am65_j721e_soc_data = {
+ .tcm_is_double = false,
+ .tcm_ecc_autoinit = false,
+ .single_cpu_mode = false,
+};
+
+static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = false,
+};
+
+static const struct k3_r5_soc_data am64_soc_data = {
+ .tcm_is_double = true,
+ .tcm_ecc_autoinit = true,
+ .single_cpu_mode = true,
+};
+
+static const struct of_device_id k3_r5_of_match[] = {
+ { .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
+ { .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
+ { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, k3_r5_of_match);
+
+static struct platform_driver k3_r5_rproc_driver = {
+ .probe = k3_r5_probe,
+ .driver = {
+ .name = "k3_r5_rproc",
+ .of_match_table = k3_r5_of_match,
+ },
+};
+
+module_platform_driver(k3_r5_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
+MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");
diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h
new file mode 100644
index 000000000000..778558abcdcc
--- /dev/null
+++ b/drivers/remoteproc/ti_sci_proc.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Texas Instruments TI-SCI Processor Controller Helper Functions
+ *
+ * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/
+ * Suman Anna <s-anna@ti.com>
+ */
+
+#ifndef REMOTEPROC_TI_SCI_PROC_H
+#define REMOTEPROC_TI_SCI_PROC_H
+
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/**
+ * struct ti_sci_proc - structure representing a processor control client
+ * @sci: cached TI-SCI protocol handle
+ * @ops: cached TI-SCI proc ops
+ * @dev: cached client device pointer
+ * @proc_id: processor id for the consumer remoteproc device
+ * @host_id: host id to pass the control over for this consumer remoteproc
+ * device
+ */
+struct ti_sci_proc {
+ const struct ti_sci_handle *sci;
+ const struct ti_sci_proc_ops *ops;
+ struct device *dev;
+ u8 proc_id;
+ u8 host_id;
+};
+
+static inline int ti_sci_proc_request(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->request(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor request failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_release(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->release(tsp->sci, tsp->proc_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor release failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp)
+{
+ int ret;
+
+ ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n",
+ tsp->proc_id, tsp->host_id, ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp,
+ u64 boot_vector,
+ u32 cfg_set, u32 cfg_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_set, cfg_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp,
+ u32 ctrl_set, u32 ctrl_clr)
+{
+ int ret;
+
+ ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n",
+ ret);
+ return ret;
+}
+
+static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp,
+ u64 *boot_vector, u32 *cfg_flags,
+ u32 *ctrl_flags, u32 *status_flags)
+{
+ int ret;
+
+ ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector,
+ cfg_flags, ctrl_flags, status_flags);
+ if (ret)
+ dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n",
+ ret);
+ return ret;
+}
+
+#endif /* REMOTEPROC_TI_SCI_PROC_H */
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index 3984e585c847..a0c204cb0979 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/platform_data/wkup_m3.h>
@@ -43,11 +44,13 @@ struct wkup_m3_mem {
* @rproc: rproc handle
* @pdev: pointer to platform device
* @mem: WkupM3 memory information
+ * @rsts: reset control
*/
struct wkup_m3_rproc {
struct rproc *rproc;
struct platform_device *pdev;
struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+ struct reset_control *rsts;
};
static int wkup_m3_rproc_start(struct rproc *rproc)
@@ -56,13 +59,16 @@ static int wkup_m3_rproc_start(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_deassert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to reset wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static int wkup_m3_rproc_stop(struct rproc *rproc)
@@ -71,23 +77,26 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->assert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_assert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to assert reset of wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
-static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, int len)
+static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem)
{
struct wkup_m3_rproc *wkupm3 = rproc->priv;
void *va = NULL;
int i;
u32 offset;
- if (len <= 0)
+ if (len == 0)
return NULL;
for (i = 0; i < WKUPM3_MEM_MAX; i++) {
@@ -132,12 +141,6 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
int ret;
int i;
- if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
- pdata->reset_name)) {
- dev_err(dev, "Platform data missing!\n");
- return -ENODEV;
- }
-
ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
&fw_name);
if (ret) {
@@ -160,11 +163,24 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
}
rproc->auto_boot = false;
+ rproc->sysfs_read_only = true;
wkupm3 = rproc->priv;
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
+ wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
+ if (IS_ERR(wkupm3->rsts))
+ return PTR_ERR(wkupm3->rsts);
+ if (!wkupm3->rsts) {
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ ret = -ENODEV;
+ goto err_put_rproc;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
@@ -173,7 +189,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
i);
ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
- goto err;
+ goto err_put_rproc;
}
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);