aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2022-01-09 23:19:30 +0000
committerLinaro CI <ci_notify@linaro.org>2022-01-09 23:19:30 +0000
commit9ac36c4170163d56e661193f658cf7eeaf8be46c (patch)
tree1dd1c5f480f7f02851224b09884e564e947714e6
parent0d6f5af9ed6a7f3a434d6c9600ae84c854f711aa (diff)
parentf8c67da59fbeaba64d7b66bb705ac7cc5aa584b7 (diff)
Merge remote-tracking branch 'sdx55-drivers/tracking-qcomlt-sdx55-drivers' into integration-linux-qcomlt
# Conflicts: # scripts/mod/file2alias.c
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/bus/Makefile2
-rw-r--r--drivers/bus/mhi/Kconfig28
-rw-r--r--drivers/bus/mhi/Makefile9
-rw-r--r--drivers/bus/mhi/common.h284
-rw-r--r--drivers/bus/mhi/ep/Kconfig10
-rw-r--r--drivers/bus/mhi/ep/Makefile2
-rw-r--r--drivers/bus/mhi/ep/internal.h237
-rw-r--r--drivers/bus/mhi/ep/main.c1674
-rw-r--r--drivers/bus/mhi/ep/mmio.c303
-rw-r--r--drivers/bus/mhi/ep/ring.c316
-rw-r--r--drivers/bus/mhi/ep/sm.c181
-rw-r--r--drivers/bus/mhi/host/Kconfig31
-rw-r--r--drivers/bus/mhi/host/Makefile (renamed from drivers/bus/mhi/core/Makefile)4
-rw-r--r--drivers/bus/mhi/host/boot.c (renamed from drivers/bus/mhi/core/boot.c)0
-rw-r--r--drivers/bus/mhi/host/debugfs.c (renamed from drivers/bus/mhi/core/debugfs.c)0
-rw-r--r--drivers/bus/mhi/host/init.c (renamed from drivers/bus/mhi/core/init.c)12
-rw-r--r--drivers/bus/mhi/host/internal.h (renamed from drivers/bus/mhi/core/internal.h)436
-rw-r--r--drivers/bus/mhi/host/main.c (renamed from drivers/bus/mhi/core/main.c)0
-rw-r--r--drivers/bus/mhi/host/pci_generic.c (renamed from drivers/bus/mhi/pci_generic.c)0
-rw-r--r--drivers/bus/mhi/host/pm.c (renamed from drivers/bus/mhi/core/pm.c)0
-rw-r--r--drivers/net/Kconfig9
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/mhi_ep_net.c375
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c88
-rw-r--r--drivers/pci/endpoint/functions/Kconfig10
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c395
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c34
-rw-r--r--include/linux/mhi_ep.h289
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/pci-epc.h8
-rw-r--r--include/linux/pci-epf.h2
-rw-r--r--scripts/mod/file2alias.c10
34 files changed, 4331 insertions, 423 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index dd36acc87ce6..9f2e71e59ae0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12419,6 +12419,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi.git
F: Documentation/ABI/stable/sysfs-bus-mhi
F: Documentation/mhi/
F: drivers/bus/mhi/
+F: drivers/net/mhi_*
F: include/linux/mhi.h
MICROBLAZE ARCHITECTURE
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 52c2f35a26a9..16da51130d1a 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -39,4 +39,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o
# MHI
-obj-$(CONFIG_MHI_BUS) += mhi/
+obj-y += mhi/
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index da5cd0c9fc62..b39a11e6c624 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -2,30 +2,8 @@
#
# MHI bus
#
-# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+# Copyright (c) 2021, Linaro Ltd.
#
-config MHI_BUS
- tristate "Modem Host Interface (MHI) bus"
- help
- Bus driver for MHI protocol. Modem Host Interface (MHI) is a
- communication protocol used by the host processors to control
- and communicate with modem devices over a high speed peripheral
- bus or shared memory.
-
-config MHI_BUS_DEBUG
- bool "Debugfs support for the MHI bus"
- depends on MHI_BUS && DEBUG_FS
- help
- Enable debugfs support for use with the MHI transport. Allows
- reading and/or modifying some values within the MHI controller
- for debug and test purposes.
-
-config MHI_BUS_PCI_GENERIC
- tristate "MHI PCI controller driver"
- depends on MHI_BUS
- depends on PCI
- help
- This driver provides MHI PCI controller driver for devices such as
- Qualcomm SDX55 based PCIe modems.
-
+source "drivers/bus/mhi/host/Kconfig"
+source "drivers/bus/mhi/ep/Kconfig"
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 0a2d778d6fb4..46981331b38f 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,6 +1,5 @@
-# core layer
-obj-y += core/
-
-obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
-mhi_pci_generic-y += pci_generic.o
+# Host MHI stack
+obj-y += host/
+# Endpoint MHI stack
+obj-y += ep/
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
new file mode 100644
index 000000000000..c1272d61e54e
--- /dev/null
+++ b/drivers/bus/mhi/common.h
@@ -0,0 +1,284 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_COMMON_H
+#define _MHI_COMMON_H
+
+#include <linux/mhi.h>
+
+/* MHI register bits */
+#define MHIREGLEN_MHIREGLEN_MASK GENMASK(31, 0)
+#define MHIREGLEN_MHIREGLEN_SHIFT 0
+
+#define MHIVER_MHIVER_MASK GENMASK(31, 0)
+#define MHIVER_MHIVER_SHIFT 0
+
+#define MHICFG_NHWER_MASK GENMASK(31, 24)
+#define MHICFG_NHWER_SHIFT 24
+#define MHICFG_NER_MASK GENMASK(23, 16)
+#define MHICFG_NER_SHIFT 16
+#define MHICFG_NHWCH_MASK GENMASK(15, 8)
+#define MHICFG_NHWCH_SHIFT 8
+#define MHICFG_NCH_MASK GENMASK(7, 0)
+#define MHICFG_NCH_SHIFT 0
+
+#define CHDBOFF_CHDBOFF_MASK GENMASK(31, 0)
+#define CHDBOFF_CHDBOFF_SHIFT 0
+
+#define ERDBOFF_ERDBOFF_MASK GENMASK(31, 0)
+#define ERDBOFF_ERDBOFF_SHIFT 0
+
+#define BHIOFF_BHIOFF_MASK GENMASK(31, 0)
+#define BHIOFF_BHIOFF_SHIFT 0
+
+#define BHIEOFF_BHIEOFF_MASK GENMASK(31, 0)
+#define BHIEOFF_BHIEOFF_SHIFT 0
+
+#define DEBUGOFF_DEBUGOFF_MASK GENMASK(31, 0)
+#define DEBUGOFF_DEBUGOFF_SHIFT 0
+
+#define MHICTRL_MHISTATE_MASK GENMASK(15, 8)
+#define MHICTRL_MHISTATE_SHIFT 8
+#define MHICTRL_RESET_MASK 2
+#define MHICTRL_RESET_SHIFT 1
+
+#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8)
+#define MHISTATUS_MHISTATE_SHIFT 8
+#define MHISTATUS_SYSERR_MASK 4
+#define MHISTATUS_SYSERR_SHIFT 2
+#define MHISTATUS_READY_MASK 1
+#define MHISTATUS_READY_SHIFT 0
+
+#define CCABAP_LOWER_CCABAP_LOWER_MASK GENMASK(31, 0)
+#define CCABAP_LOWER_CCABAP_LOWER_SHIFT 0
+
+#define CCABAP_HIGHER_CCABAP_HIGHER_MASK GENMASK(31, 0)
+#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT 0
+
+#define ECABAP_LOWER_ECABAP_LOWER_MASK GENMASK(31, 0)
+#define ECABAP_LOWER_ECABAP_LOWER_SHIFT 0
+
+#define ECABAP_HIGHER_ECABAP_HIGHER_MASK GENMASK(31, 0)
+#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT 0
+
+#define CRCBAP_LOWER_CRCBAP_LOWER_MASK GENMASK(31, 0)
+#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT 0
+
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK GENMASK(31, 0)
+#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT 0
+
+#define CRDB_LOWER_CRDB_LOWER_MASK GENMASK(31, 0)
+#define CRDB_LOWER_CRDB_LOWER_SHIFT 0
+
+#define CRDB_HIGHER_CRDB_HIGHER_MASK GENMASK(31, 0)
+#define CRDB_HIGHER_CRDB_HIGHER_SHIFT 0
+
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK GENMASK(31, 0)
+#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT 0
+
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK GENMASK(31, 0)
+#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT 0
+
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK GENMASK(31, 0)
+#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT 0
+
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK GENMASK(31, 0)
+#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT 0
+
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK GENMASK(31, 0)
+#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT 0
+
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK GENMASK(31, 0)
+#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT 0
+
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK GENMASK(31, 0)
+#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT 0
+
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK GENMASK(31, 0)
+#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT 0
+
+/* Command Ring Element macros */
+/* No operation command */
+#define MHI_TRE_CMD_NOOP_PTR 0
+#define MHI_TRE_CMD_NOOP_DWORD0 0
+#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
+
+/* Channel reset command */
+#define MHI_TRE_CMD_RESET_PTR 0
+#define MHI_TRE_CMD_RESET_DWORD0 0
+#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_RESET_CHAN << 16))
+
+/* Channel stop command */
+#define MHI_TRE_CMD_STOP_PTR 0
+#define MHI_TRE_CMD_STOP_DWORD0 0
+#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_STOP_CHAN << 16))
+
+/* Channel start command */
+#define MHI_TRE_CMD_START_PTR 0
+#define MHI_TRE_CMD_START_DWORD0 0
+#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
+ (MHI_CMD_START_CHAN << 16))
+
+#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xff)
+#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xff)
+
+/* Event descriptor macros */
+/* Transfer completion event */
+#define MHI_TRE_EV_PTR(ptr) (ptr)
+#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
+#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
+#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xff)
+#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xffff)
+#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xff)
+#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xff)
+#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xff)
+#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xff)
+#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
+#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
+#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xff)
+#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xff)
+#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xff)
+
+/* State change event */
+#define MHI_SC_EV_PTR 0
+#define MHI_SC_EV_DWORD0(state) (state << 24)
+#define MHI_SC_EV_DWORD1(type) (type << 16)
+
+/* EE event */
+#define MHI_EE_EV_PTR 0
+#define MHI_EE_EV_DWORD0(ee) (ee << 24)
+#define MHI_EE_EV_DWORD1(type) (type << 16)
+
+/* Command Completion event */
+#define MHI_CC_EV_PTR(ptr) (ptr)
+#define MHI_CC_EV_DWORD0(code) (code << 24)
+#define MHI_CC_EV_DWORD1(type) (type << 16)
+
+/* Transfer descriptor macros */
+#define MHI_TRE_DATA_PTR(ptr) (ptr)
+#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+
+/* RSC transfer descriptor macros */
+#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
+#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
+#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
+
+enum mhi_pkt_type {
+ MHI_PKT_TYPE_INVALID = 0x0,
+ MHI_PKT_TYPE_NOOP_CMD = 0x1,
+ MHI_PKT_TYPE_TRANSFER = 0x2,
+ MHI_PKT_TYPE_COALESCING = 0x8,
+ MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
+ MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
+ MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
+ MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
+ MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
+ MHI_PKT_TYPE_TX_EVENT = 0x22,
+ MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
+ MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
+ MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
+ MHI_PKT_TYPE_STALE_EVENT, /* internal event */
+};
+
+/* MHI transfer completion events */
+enum mhi_ev_ccs {
+ MHI_EV_CC_INVALID = 0x0,
+ MHI_EV_CC_SUCCESS = 0x1,
+ MHI_EV_CC_EOT = 0x2, /* End of transfer event */
+ MHI_EV_CC_OVERFLOW = 0x3,
+ MHI_EV_CC_EOB = 0x4, /* End of block event */
+ MHI_EV_CC_OOB = 0x5, /* Out of block event */
+ MHI_EV_CC_DB_MODE = 0x6,
+ MHI_EV_CC_UNDEFINED_ERR = 0x10,
+ MHI_EV_CC_BAD_TRE = 0x11,
+};
+
+/* Channel state */
+enum mhi_ch_state {
+ MHI_CH_STATE_DISABLED,
+ MHI_CH_STATE_ENABLED,
+ MHI_CH_STATE_RUNNING,
+ MHI_CH_STATE_SUSPENDED,
+ MHI_CH_STATE_STOP,
+ MHI_CH_STATE_ERROR,
+};
+
+enum mhi_cmd_type {
+ MHI_CMD_NOP = 1,
+ MHI_CMD_RESET_CHAN = 16,
+ MHI_CMD_STOP_CHAN = 17,
+ MHI_CMD_START_CHAN = 18,
+};
+
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODC_SHIFT 8
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+#define EV_CTX_INTMODT_SHIFT 16
+struct mhi_event_ctxt {
+ __u32 intmod;
+ __u32 ertype;
+ __u32 msivec;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_CHSTATE_SHIFT 0
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_BRSTMODE_SHIFT 8
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_POLLCFG_SHIFT 10
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_chan_ctxt {
+ __u32 chcfg;
+ __u32 chtype;
+ __u32 erindex;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+struct mhi_cmd_ctxt {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+static const char * const mhi_state_str[MHI_STATE_MAX] = {
+ [MHI_STATE_RESET] = "RESET",
+ [MHI_STATE_READY] = "READY",
+ [MHI_STATE_M0] = "M0",
+ [MHI_STATE_M1] = "M1",
+ [MHI_STATE_M2] = "M2",
+ [MHI_STATE_M3] = "M3",
+ [MHI_STATE_M3_FAST] = "M3 FAST",
+ [MHI_STATE_BHI] = "BHI",
+ [MHI_STATE_SYS_ERR] = "SYS ERROR",
+};
+
+#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
+ !mhi_state_str[state]) ? \
+ "INVALID_STATE" : mhi_state_str[state])
+
+#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/Kconfig b/drivers/bus/mhi/ep/Kconfig
new file mode 100644
index 000000000000..229c71397b30
--- /dev/null
+++ b/drivers/bus/mhi/ep/Kconfig
@@ -0,0 +1,10 @@
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+ MHI_BUS_EP implements the MHI protocol for the endpoint devices
+ like SDX55 modem connected to the host machine over PCIe.
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 000000000000..aad85f180b70
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+mhi_ep-y := main.o mmio.o ring.o sm.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 000000000000..e0c36346c5b8
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+
+#include "../common.h"
+
+extern struct bus_type mhi_ep_bus_type;
+
+/* MHI register definitions */
+#define MHIREGLEN 0x100
+#define MHIVER 0x108
+#define MHICFG 0x110
+#define CHDBOFF 0x118
+#define ERDBOFF 0x120
+#define BHIOFF 0x128
+#define DEBUGOFF 0x130
+#define MHICTRL 0x138
+#define MHISTATUS 0x148
+#define CCABAP_LOWER 0x158
+#define CCABAP_HIGHER 0x15c
+#define ECABAP_LOWER 0x160
+#define ECABAP_HIGHER 0x164
+#define CRCBAP_LOWER 0x168
+#define CRCBAP_HIGHER 0x16c
+#define CRDB_LOWER 0x170
+#define CRDB_HIGHER 0x174
+#define MHICTRLBASE_LOWER 0x180
+#define MHICTRLBASE_HIGHER 0x184
+#define MHICTRLLIMIT_LOWER 0x188
+#define MHICTRLLIMIT_HIGHER 0x18c
+#define MHIDATABASE_LOWER 0x198
+#define MHIDATABASE_HIGHER 0x19c
+#define MHIDATALIMIT_LOWER 0x1a0
+#define MHIDATALIMIT_HIGHER 0x1a4
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+#define BHI_INTVEC 0x220
+#define BHI_EXECENV 0x228
+#define BHI_IMGTXDB 0x218
+
+#define MHI_CTRL_INT_STATUS_A7 0x4
+#define MHI_CTRL_INT_STATUS_A7_MSK BIT(0)
+#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1)
+#define MHI_CHDB_INT_STATUS_A7_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_A7_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR_A7 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0)
+
+#define MHI_CTRL_INT_MASK_A7 0x94
+#define MHI_CTRL_INT_MASK_A7_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_MHICTRL_SHFT 0
+#define MHI_CTRL_CRDB_MASK BIT(1)
+#define MHI_CTRL_CRDB_SHFT 1
+
+#define MHI_CHDB_INT_MASK_A7_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_A7_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0)
+
+#define NR_OF_CMD_RINGS 1
+#define MHI_MASK_ROWS_CH_EV_DB 4
+#define MHI_MASK_CH_EV_LEN 32
+
+/* Generic context */
+struct mhi_generic_ctx {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+enum mhi_ep_execenv {
+ MHI_EP_SBL_EE = 1,
+ MHI_EP_AMSS_EE = 2,
+ MHI_EP_UNRESERVED
+};
+
+/* Transfer Ring Element macros */
+#define MHI_EP_TRE_PTR(ptr) (ptr)
+#define MHI_EP_TRE_DWORD0(len) (len & MHI_MAX_MTU)
+#define MHI_EP_TRE_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
+ | (ieot << 9) | (ieob << 8) | chain)
+#define MHI_EP_TRE_GET_PTR(tre) ((tre)->ptr)
+#define MHI_EP_TRE_GET_LEN(tre) ((tre)->dword[0] & 0xffff)
+#define MHI_EP_TRE_GET_CHAIN(tre) FIELD_GET(BIT(0), (tre)->dword[1])
+#define MHI_EP_TRE_GET_IEOB(tre) FIELD_GET(BIT(8), (tre)->dword[1])
+#define MHI_EP_TRE_GET_IEOT(tre) FIELD_GET(BIT(9), (tre)->dword[1])
+#define MHI_EP_TRE_GET_BEI(tre) FIELD_GET(BIT(10), (tre)->dword[1])
+
+enum mhi_ep_ring_state {
+ RING_STATE_UINT = 0,
+ RING_STATE_IDLE,
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD = 0,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+ RING_TYPE_INVALID,
+};
+
+struct mhi_ep_ring_element {
+ u64 ptr;
+ u32 dword[2];
+};
+
+/* Transfer ring element type */
+union mhi_ep_ring_ctx {
+ struct mhi_cmd_ctxt cmd;
+ struct mhi_event_ctxt ev;
+ struct mhi_chan_ctxt ch;
+ struct mhi_generic_ctx generic;
+};
+
+struct mhi_ep_ring {
+ struct list_head list;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ int (*ring_cb)(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el);
+ union mhi_ep_ring_ctx *ring_ctx;
+ struct mhi_ep_ring_element *ring_cache;
+ enum mhi_ep_ring_type type;
+ enum mhi_ep_ring_state state;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ u32 ch_id;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_state_transition {
+ struct list_head node;
+ enum mhi_state state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_ring ring;
+ struct mutex lock;
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+ enum mhi_ch_state state;
+ enum dma_data_direction dir;
+ u64 tre_loc;
+ u32 tre_size;
+ u32 tre_bytes_left;
+ u32 chan;
+ bool skip_td;
+};
+
+/* MHI Ring related functions */
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+void mhi_ep_ring_stop(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring);
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr);
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx);
+int mhi_ep_process_ring(struct mhi_ep_ring *ring);
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *element,
+ int evt_offset);
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring);
+int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el);
+int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el);
+
+/* MMIO related functions */
+void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval);
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset,
+ u32 mask, u32 shift, u32 val);
+int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset,
+ u32 mask, u32 shift, u32 *regval);
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id);
+void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id);
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset);
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/* MHI EP core functions */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state);
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_execenv exec_env);
+bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state);
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state);
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl);
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 000000000000..42470d2a82b8
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1674 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MHI Bus Endpoint stack
+ *
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/dma-direction.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include "internal.h"
+
+#define MHI_SUSPEND_MIN 100
+#define MHI_SUSPEND_TIMEOUT 600
+
+static DEFINE_IDA(mhi_ep_cntrl_ida);
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+static int mhi_ep_destroy_device(struct device *dev, void *data);
+
+static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 event_ring,
+ struct mhi_ep_ring_element *el)
+{
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_event[event_ring].ring;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_ctx *ctx;
+ int ret;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[event_ring];
+ if (ring->state == RING_STATE_UINT) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring (%d)\n", event_ring);
+ goto err_unlock;
+ }
+ }
+
+ /* Add element to the primary event ring (0) */
+ ret = mhi_ep_ring_add_element(ring, el, 0);
+ if (ret) {
+ dev_err(dev, "Error adding element to event ring (%d)\n", event_ring);
+ goto err_unlock;
+ }
+
+ /* Ensure that the ring pointer gets updated in host memory before triggering IRQ */
+ wmb();
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /*
+ * Raise IRQ to host only if the BEI flag is not set in TRE. Host might
+ * set this flag for interrupt moderation as per MHI protocol.
+ */
+ if (!MHI_EP_TRE_GET_BEI(el))
+ mhi_cntrl->raise_irq(mhi_cntrl);
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ return ret;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring, u32 len,
+ enum mhi_ev_ccs code)
+{
+ struct mhi_ep_ring_element event = {};
+ u32 er_index, tmp;
+
+ er_index = mhi_cntrl->ch_ctx_cache[ring->ch_id].erindex;
+ event.ptr = ring->ring_ctx->generic.rbase +
+ ring->rd_offset * sizeof(struct mhi_ep_ring_element);
+
+ tmp = event.dword[0];
+ tmp |= MHI_TRE_EV_DWORD0(code, len);
+ event.dword[0] = tmp;
+
+ tmp = event.dword[1];
+ tmp |= MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT);
+ event.dword[1] = tmp;
+
+ return mhi_ep_send_event(mhi_cntrl, er_index, &event);
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state)
+{
+ struct mhi_ep_ring_element event = {};
+ u32 tmp;
+
+ tmp = event.dword[0];
+ tmp |= MHI_SC_EV_DWORD0(state);
+ event.dword[0] = tmp;
+
+ tmp = event.dword[1];
+ tmp |= MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT);
+ event.dword[1] = tmp;
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_execenv exec_env)
+{
+ struct mhi_ep_ring_element event = {};
+ u32 tmp;
+
+ tmp = event.dword[0];
+ tmp |= MHI_EE_EV_DWORD0(exec_env);
+ event.dword[0] = tmp;
+
+ tmp = event.dword[1];
+ tmp |= MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT);
+ event.dword[1] = tmp;
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_element event = {};
+ u32 tmp;
+
+ if (code > MHI_EV_CC_BAD_TRE) {
+ dev_err(dev, "Invalid command completion code: %d\n", code);
+ return -EINVAL;
+ }
+
+ event.ptr = mhi_cntrl->cmd_ctx_cache->rbase
+ + (mhi_cntrl->mhi_cmd->ring.rd_offset *
+ (sizeof(struct mhi_ep_ring_element)));
+
+ tmp = event.dword[0];
+ tmp |= MHI_CC_EV_DWORD0(code);
+ event.dword[0] = tmp;
+
+ tmp = event.dword[1];
+ tmp |= MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT);
+ event.dword[1] = tmp;
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+
+/*
+ * We don't need to do anything special other than setting the MHI SYS_ERR
+ * state. The host issue will reset all contexts and issue MHI RESET so that we
+ * could also recover from error state.
+ */
+void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* If MHI EP is not enabled, nothing to do */
+ if (!mhi_cntrl->is_enabled)
+ return;
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ return;
+
+ /* Signal host that the device went to SYS_ERR state */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR);
+ if (ret)
+ dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret);
+}
+
+int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ch_ring, *event_ring;
+ union mhi_ep_ring_ctx *event_ctx;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ u32 event_ring_idx, tmp;
+ u32 ch_id;
+ int ret;
+
+ ch_id = MHI_TRE_GET_CMD_CHID(el);
+ mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+
+ switch (MHI_TRE_GET_CMD_TYPE(el)) {
+ case MHI_PKT_TYPE_START_CHAN_CMD:
+ dev_dbg(dev, "Received START command for channel (%d)\n", ch_id);
+
+ mutex_lock(&mhi_chan->lock);
+ /* Initialize and configure the corresponding channel ring */
+ if (ch_ring->state == RING_STATE_UINT) {
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev, "Failed to start ring for channel (%d)\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_EV_CC_UNDEFINED_ERR);
+ if (ret)
+ dev_err(dev, "Error sending completion event: %d\n",
+ MHI_EV_CC_UNDEFINED_ERR);
+
+ goto err_unlock;
+ }
+ }
+
+ /* Enable DB for the channel */
+ mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ch_id);
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ event_ring_idx = mhi_cntrl->ch_ctx_cache[ch_id].erindex;
+ event_ring = &mhi_cntrl->mhi_event[event_ring_idx].ring;
+ event_ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[event_ring_idx];
+ if (event_ring->state == RING_STATE_UINT) {
+ ret = mhi_ep_ring_start(mhi_cntrl, event_ring, event_ctx);
+ if (ret) {
+ dev_err(dev, "Error starting event ring: %d\n",
+ mhi_cntrl->ch_ctx_cache[ch_id].erindex);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ goto err_unlock;
+ }
+ }
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ /* Set channel state to RUNNING */
+ mhi_chan->state = MHI_CH_STATE_RUNNING;
+ tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_RUNNING << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp;
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event: %d\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+
+ /*
+ * Create MHI device only during UL channel start. Since the MHI
+ * channels operate in a pair, we'll associate both UL and DL
+ * channels to the same device.
+ *
+ * We also need to check for mhi_dev != NULL because, the host
+ * will issue START_CHAN command during resume and we don't
+ * destroy the device during suspend.
+ */
+ if (!(ch_id % 2) && !mhi_chan->mhi_dev) {
+ ret = mhi_ep_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ dev_err(dev, "Error creating device for channel (%d)\n", ch_id);
+ return ret;
+ }
+ }
+
+ break;
+ case MHI_PKT_TYPE_STOP_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%d)\n", ch_id);
+ if (ch_ring->state == RING_STATE_UINT) {
+ dev_err(dev, "Channel (%d) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Disable DB for the channel */
+ mhi_ep_mmio_disable_chdb_a7(mhi_cntrl, ch_id);
+
+ /* Set the local value of the transfer ring read pointer to the channel context */
+ ch_ring->rd_offset = mhi_ep_ring_addr2offset(ch_ring,
+ ch_ring->ring_ctx->generic.rp);
+
+ /* Send channel disconnect status to client drivers */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to STOP */
+ mhi_chan->state = MHI_CH_STATE_STOP;
+ tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_STOP << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp;
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event: %d\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ case MHI_PKT_TYPE_RESET_CHAN_CMD:
+ dev_dbg(dev, "Received STOP command for channel (%d)\n", ch_id);
+ if (ch_ring->state == RING_STATE_UINT) {
+ dev_err(dev, "Channel (%d) not opened\n", ch_id);
+ return -ENODEV;
+ }
+
+ mutex_lock(&mhi_chan->lock);
+ /* Stop and reset the transfer ring */
+ mhi_ep_ring_stop(mhi_cntrl, ch_ring);
+
+ /* Send channel disconnect status to client driver */
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg;
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp;
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS);
+ if (ret) {
+ dev_err(dev, "Error sending command completion event: %d\n",
+ MHI_EV_CC_SUCCESS);
+ goto err_unlock;
+ }
+ mutex_unlock(&mhi_chan->lock);
+ break;
+ default:
+ dev_err(dev, "Invalid command received: %d for channel (%d)",
+ MHI_TRE_GET_CMD_TYPE(el), ch_id);
+ return -EINVAL;
+ }
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+static int mhi_ep_check_tre_bytes_left(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_ep_ring_element *el)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ bool td_done = 0;
+
+ /* A full TRE worth of data was consumed. Check if we are at a TD boundary */
+ if (mhi_chan->tre_bytes_left == 0) {
+ if (MHI_EP_TRE_GET_CHAIN(el)) {
+ if (MHI_EP_TRE_GET_IEOB(el))
+ mhi_ep_send_completion_event(mhi_cntrl,
+ ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOB);
+ } else {
+ if (MHI_EP_TRE_GET_IEOT(el))
+ mhi_ep_send_completion_event(mhi_cntrl,
+ ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOT);
+ td_done = 1;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ mhi_chan->tre_bytes_left = 0;
+ mhi_chan->tre_loc = 0;
+ }
+
+ return td_done;
+}
+
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ return !!(ring->rd_offset == ring->wr_offset);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty);
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t bytes_to_read, addr_offset;
+ struct mhi_ep_ring_element *el;
+ ssize_t bytes_read = 0;
+ u32 buf_remaining;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ void *write_to_loc;
+ u64 read_from_loc;
+ bool td_done = 0;
+ int ret;
+
+ buf_remaining = len;
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING)
+ return -ENODEV;
+
+ el = &ring->ring_cache[ring->rd_offset];
+
+ if (mhi_chan->tre_loc) {
+ bytes_to_read = min(buf_remaining,
+ mhi_chan->tre_bytes_left);
+ dev_dbg(dev, "TRE bytes remaining: %d", mhi_chan->tre_bytes_left);
+ } else {
+ if (mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE))
+ /* Nothing to do */
+ return 0;
+
+ mhi_chan->tre_loc = MHI_EP_TRE_GET_PTR(el);
+ mhi_chan->tre_size = MHI_EP_TRE_GET_LEN(el);
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ bytes_to_read = min(buf_remaining, mhi_chan->tre_size);
+ }
+
+ bytes_read += bytes_to_read;
+ addr_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ read_from_loc = mhi_chan->tre_loc + addr_offset;
+ write_to_loc = result->buf_addr + (len - buf_remaining);
+ mhi_chan->tre_bytes_left -= bytes_to_read;
+
+ tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &tre_phys, bytes_to_read);
+ if (!tre_buf) {
+ dev_err(dev, "Failed to allocate TRE buffer\n");
+ return -ENOMEM;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, tre_phys, read_from_loc, bytes_to_read);
+ if (ret) {
+ dev_err(dev, "Failed to map TRE buffer\n");
+ goto err_tre_free;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Reading %d bytes", bytes_to_read);
+ memcpy_fromio(write_to_loc, tre_buf, bytes_to_read);
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, tre_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_read);
+
+ buf_remaining -= bytes_to_read;
+ td_done = mhi_ep_check_tre_bytes_left(mhi_cntrl, ring, el);
+ } while (buf_remaining && !td_done);
+
+ result->bytes_xferd = bytes_read;
+
+ return bytes_read;
+
+err_tre_free:
+ mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_read);
+
+ return ret;
+}
+
+int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct mhi_result result = {};
+ u32 len = MHI_EP_DEFAULT_MTU;
+ struct mhi_ep_chan *mhi_chan;
+ int ret = 0;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ /*
+ * Bail out if transfer callback is not registered for the channel.
+ * This is most likely due to the client driver not loaded at this point.
+ */
+ if (!mhi_chan->xfer_cb) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Client driver not available\n");
+ return -ENODEV;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Processing TRE ring\n");
+
+ mutex_lock(&mhi_chan->lock);
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ while (1) {
+ result.buf_addr = kzalloc(len, GFP_KERNEL);
+ if (!result.buf_addr) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret < 0) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel");
+ kfree(result.buf_addr);
+ break;
+ } else if (ret == 0) {
+ /* No more data to read */
+ kfree(result.buf_addr);
+ break;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ kfree(result.buf_addr);
+ }
+ }
+
+err_unlock:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+static void skip_to_next_td(struct mhi_ep_chan *mhi_chan, struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_ring_element *el;
+ u32 td_boundary_reached = 0;
+
+ mhi_chan->skip_td = 1;
+ el = &ring->ring_cache[ring->rd_offset];
+ while (ring->rd_offset != ring->wr_offset) {
+ if (td_boundary_reached) {
+ mhi_chan->skip_td = 0;
+ break;
+ }
+
+ if (!MHI_EP_TRE_GET_CHAIN(el))
+ td_boundary_reached = 1;
+
+ mhi_ep_ring_inc_index(ring);
+ el = &ring->ring_cache[ring->rd_offset];
+ }
+}
+
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ enum mhi_ev_ccs code = MHI_EV_CC_INVALID;
+ struct mhi_ep_ring_element *el;
+ u64 write_to_loc, skip_tre = 0;
+ struct mhi_ep_ring *ring;
+ size_t bytes_to_write;
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+ void *read_from_loc;
+ u32 buf_remaining;
+ u32 tre_len;
+ int ret = 0;
+
+ if (dir == DMA_TO_DEVICE)
+ return -EINVAL;
+
+ buf_remaining = len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ mutex_lock(&mhi_chan->lock);
+ if (mhi_chan->skip_td)
+ skip_to_next_td(mhi_chan, ring);
+
+ do {
+ /* Don't process the transfer ring if the channel is not in RUNNING state */
+ if (mhi_chan->state != MHI_CH_STATE_RUNNING) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Channel not available");
+ ret = -ENODEV;
+ goto err_exit;
+ }
+
+ if (mhi_ep_queue_is_empty(mhi_dev, dir)) {
+ dev_err(&mhi_chan->mhi_dev->dev, "TRE not available!\n");
+ ret = -EINVAL;
+ goto err_exit;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = MHI_EP_TRE_GET_LEN(el);
+ if (skb->len > tre_len) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Buffer size (%d) is too large!\n",
+ skb->len);
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ bytes_to_write = min(buf_remaining, tre_len);
+ read_from_loc = skb->data;
+ write_to_loc = MHI_EP_TRE_GET_PTR(el);
+
+ tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &tre_phys, bytes_to_write);
+ if (!tre_buf) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to allocate TRE buffer\n");
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, tre_phys, write_to_loc, bytes_to_write);
+ if (ret) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Failed to map TRE buffer\n");
+ goto err_tre_free;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Writing %d bytes", bytes_to_write);
+ memcpy_toio(tre_buf, read_from_loc, bytes_to_write);
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, tre_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_write);
+
+ buf_remaining -= bytes_to_write;
+ if (buf_remaining) {
+ if (!MHI_EP_TRE_GET_CHAIN(el))
+ code = MHI_EV_CC_OVERFLOW;
+ else if (MHI_EP_TRE_GET_IEOB(el))
+ code = MHI_EV_CC_EOB;
+ } else {
+ if (MHI_EP_TRE_GET_CHAIN(el))
+ skip_tre = 1;
+ code = MHI_EV_CC_EOT;
+ }
+
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring, bytes_to_write, code);
+ if (ret) {
+ dev_err(&mhi_chan->mhi_dev->dev, "Error sending completion event");
+ goto err_exit;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ } while (!skip_tre && buf_remaining);
+
+ if (skip_tre)
+ skip_to_next_td(mhi_chan, ring);
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_tre_free:
+ mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_write);
+err_exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ dev_dbg(dev, "Number of Event rings: %d, HW Event rings: %d\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ mhi_cntrl->ch_ctx_host_size = sizeof(struct mhi_chan_ctxt) *
+ mhi_cntrl->max_chan;
+ mhi_cntrl->ev_ctx_host_size = sizeof(struct mhi_event_ctxt) *
+ mhi_cntrl->event_rings;
+ mhi_cntrl->cmd_ctx_host_size = sizeof(struct mhi_cmd_ctxt);
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ /* Allocate memory for caching host channel context */
+ mhi_cntrl->ch_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->ch_ctx_cache_phys,
+ mhi_cntrl->ch_ctx_host_size);
+ if (!mhi_cntrl->ch_ctx_cache) {
+ dev_err(dev, "Failed to allocate ch_ctx_cache memory\n");
+ return -ENOMEM;
+ }
+
+ /* Map the host channel context */
+ ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys,
+ mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map ch_ctx_cache\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ /* Allocate memory for caching host event context */
+ mhi_cntrl->ev_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->ev_ctx_cache_phys,
+ mhi_cntrl->ev_ctx_host_size);
+ if (!mhi_cntrl->ev_ctx_cache) {
+ dev_err(dev, "Failed to allocate ev_ctx_cache memory\n");
+ ret = -ENOMEM;
+ goto err_ch_ctx_map;
+ }
+
+ /* Map the host event context */
+ ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys,
+ mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map ev_ctx_cache\n");
+ goto err_ev_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ /* Allocate memory for caching host command context */
+ mhi_cntrl->cmd_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->cmd_ctx_cache_phys,
+ mhi_cntrl->cmd_ctx_host_size);
+ if (!mhi_cntrl->cmd_ctx_cache) {
+ dev_err(dev, "Failed to allocate cmd_ctx_cache memory\n");
+ ret = -ENOMEM;
+ goto err_ev_ctx_map;
+ }
+
+ /* Map the host command context */
+ ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys,
+ mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map cmd_ctx_cache\n");
+ goto err_cmd_ctx;
+ }
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the command ring\n");
+ goto err_cmd_ctx_map;
+ }
+
+ return ret;
+
+err_cmd_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys);
+
+err_cmd_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys,
+ mhi_cntrl->cmd_ctx_cache, mhi_cntrl->cmd_ctx_host_size);
+
+err_ev_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys);
+
+err_ev_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys,
+ mhi_cntrl->ev_ctx_cache, mhi_cntrl->ev_ctx_host_size);
+
+err_ch_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys);
+
+err_ch_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys,
+ mhi_cntrl->ch_ctx_cache, mhi_cntrl->ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys,
+ mhi_cntrl->cmd_ctx_cache, mhi_cntrl->cmd_ctx_host_size);
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys,
+ mhi_cntrl->ev_ctx_cache, mhi_cntrl->ev_ctx_host_size);
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys,
+ mhi_cntrl->ch_ctx_cache, mhi_cntrl->ch_ctx_host_size);
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_enable_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+}
+
+static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 max_cnt = 0;
+ bool mhi_reset;
+ int ret;
+
+ /* Wait for Host to set the M0 state */
+ do {
+ msleep(MHI_SUSPEND_MIN);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ /* Clear the MHI reset if host is in reset state */
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_dbg(dev, "Host initiated reset while waiting for M0\n");
+ }
+ max_cnt++;
+ } while (state != MHI_STATE_M0 && max_cnt < MHI_SUSPEND_TIMEOUT);
+
+ if (state == MHI_STATE_M0) {
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache host config\n");
+ return ret;
+ }
+
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+ } else {
+ dev_err(dev, "Host failed to enter M0\n");
+ return -ETIMEDOUT;
+ }
+
+ /* Enable all interrupts now */
+ mhi_ep_enable_int(mhi_cntrl);
+
+ return 0;
+}
+
+static void mhi_ep_ring_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work,
+ struct mhi_ep_cntrl, ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ring;
+ struct list_head *cp, *q;
+ unsigned long flags;
+ int ret = 0;
+
+ /* Process the command ring first */
+ ret = mhi_ep_process_ring(&mhi_cntrl->mhi_cmd->ring);
+ if (ret) {
+ dev_err(dev, "Error processing command ring\n");
+ goto err_unlock;
+ }
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ /* Process the channel rings now */
+ list_for_each_safe(cp, q, &mhi_cntrl->ch_db_list) {
+ ring = list_entry(cp, struct mhi_ep_ring, list);
+ list_del(cp);
+ ret = mhi_ep_process_ring(ring);
+ if (ret) {
+ dev_err(dev, "Error processing channel ring: %d\n", ring->ch_id);
+ goto err_unlock;
+ }
+
+ /* Re-enable channel interrupt */
+ mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ring->ch_id);
+ }
+
+err_unlock:
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl,
+ unsigned long ch_int, u32 ch_idx)
+{
+ struct mhi_ep_ring *ring;
+ unsigned int i;
+
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ i += ch_idx;
+ ring = &mhi_cntrl->mhi_chan[i].ring;
+
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add(&ring->list, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+ /*
+ * Disable the channel interrupt here and enable it once
+ * the current interrupt got serviced
+ */
+ mhi_ep_mmio_disable_chdb_a7(mhi_cntrl, i);
+ queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 ch_int, ch_idx;
+ int i;
+
+ mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl);
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_EV_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = (mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask);
+ if (ch_int) {
+ dev_dbg(dev, "Processing channel doorbell interrupt\n");
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_state_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_state_transition *itr, *tmp;
+ unsigned long flags;
+ LIST_HEAD(head);
+ int ret;
+
+ spin_lock_irqsave(&mhi_cntrl->list_lock, flags);
+ list_splice_tail_init(&mhi_cntrl->st_transition_list, &head);
+ spin_unlock_irqrestore(&mhi_cntrl->list_lock, flags);
+
+ list_for_each_entry_safe(itr, tmp, &head, node) {
+ list_del(&itr->node);
+ dev_dbg(dev, "Handling MHI state transition to %s\n",
+ TO_MHI_STATE_STR(itr->state));
+
+ switch (itr->state) {
+ case MHI_STATE_M0:
+ ret = mhi_ep_set_m0_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M0 state\n");
+ break;
+ case MHI_STATE_M3:
+ ret = mhi_ep_set_m3_state(mhi_cntrl);
+ if (ret)
+ dev_err(dev, "Failed to transition to M3 state\n");
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state transition: %d", itr->state);
+ break;
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC);
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->state_wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value = 0;
+ bool mhi_reset;
+
+ /* Acknowledge the interrupts */
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS_A7, &int_value);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_A7_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ dev_info(dev, "Host triggered MHI reset!\n");
+ disable_irq_nosync(mhi_cntrl->irq);
+ schedule_work(&mhi_cntrl->reset_work);
+ return IRQ_HANDLED;
+ }
+
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) {
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+ queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
+static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_ring *ch_ring, *ev_ring;
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int i;
+
+ /* Stop all the channels */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ if (ch_ring->state == RING_STATE_UINT)
+ continue;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to client drivers */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ flush_workqueue(mhi_cntrl->ring_wq);
+ flush_workqueue(mhi_cntrl->state_wq);
+
+ /* Destroy devices associated with all channels */
+ device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device);
+
+ /* Stop and reset the transfer rings */
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ ch_ring = &mhi_cntrl->mhi_chan[i].ring;
+ if (ch_ring->state == RING_STATE_UINT)
+ continue;
+
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+ mutex_lock(&mhi_chan->lock);
+ mhi_ep_ring_stop(mhi_cntrl, ch_ring);
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Stop and reset the event rings */
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ ev_ring = &mhi_cntrl->mhi_event[i].ring;
+ if (ev_ring->state == RING_STATE_UINT)
+ continue;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+ mhi_ep_ring_stop(mhi_cntrl, ev_ring);
+ mutex_unlock(&mhi_cntrl->event_lock);
+ }
+
+ /* Stop and reset the command ring */
+ mhi_ep_ring_stop(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring);
+
+ mhi_ep_free_host_cfg(mhi_cntrl);
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_cntrl->is_enabled = false;
+}
+
+static void mhi_ep_reset_worker(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state cur_state;
+ int ret;
+
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */
+ mhi_ep_mmio_reset(mhi_cntrl);
+ cur_state = mhi_cntrl->mhi_state;
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ /*
+ * Only proceed further if the reset is due to SYS_ERR. The host will
+ * issue reset during shutdown also and we don't need to do re-init in
+ * that case.
+ */
+ if (cur_state == MHI_STATE_SYS_ERR) {
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ return;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+ return;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ }
+}
+
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret, i;
+
+ /*
+ * Mask all interrupts until the state machine is ready. Interrupts will
+ * be enabled later with mhi_ep_enable().
+ */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+ mhi_ep_mmio_init(mhi_cntrl);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return -ENOMEM;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ mhi_cntrl->mhi_state = MHI_STATE_RESET;
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ /* Set AMSS EE before signaling ready state */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+
+ /* All set, notify the host that we are ready */
+ ret = mhi_ep_set_ready_state(mhi_cntrl);
+ if (ret)
+ goto err_free_event;
+
+ dev_dbg(dev, "READY state notification sent to the host\n");
+
+ ret = mhi_ep_enable(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret);
+ goto err_free_event;
+ }
+
+ enable_irq(mhi_cntrl->irq);
+ mhi_cntrl->is_enabled = true;
+
+ return 0;
+
+err_free_event:
+ kfree(mhi_cntrl->mhi_event);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_up);
+
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ if (mhi_cntrl->is_enabled)
+ mhi_ep_abort_transfer(mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_event);
+ disable_irq(mhi_cntrl->irq);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_power_down);
+
+void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently running */
+ tmp = mhi_cntrl->ch_ctx_cache[i].chcfg;
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n");
+ /* Set channel state to SUSPENDED */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_SUSPENDED << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = tmp;
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_chan *mhi_chan;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < mhi_cntrl->max_chan; i++) {
+ mhi_chan = &mhi_cntrl->mhi_chan[i];
+
+ if (!mhi_chan->mhi_dev)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Skip if the channel is not currently suspended */
+ tmp = mhi_cntrl->ch_ctx_cache[i].chcfg;
+ if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) {
+ mutex_unlock(&mhi_chan->lock);
+ continue;
+ }
+
+ dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n");
+ /* Set channel state to RUNNING */
+ tmp &= ~CHAN_CTX_CHSTATE_MASK;
+ tmp |= (MHI_CH_STATE_RUNNING << CHAN_CTX_CHSTATE_SHIFT);
+ mhi_cntrl->ch_ctx_cache[i].chcfg = tmp;
+ mutex_unlock(&mhi_chan->lock);
+ }
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ /*
+ * We need to set the mhi_chan->mhi_dev to NULL here since the MHI
+ * devices for the channels will only get created if the mhi_dev
+ * associated with it is NULL.
+ */
+ if (mhi_dev->ul_chan)
+ mhi_dev->ul_chan->mhi_dev = NULL;
+
+ if (mhi_dev->dl_chan)
+ mhi_dev->dl_chan->mhi_dev = NULL;
+
+ kfree(mhi_dev);
+}
+
+static struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ if (mhi_cntrl->mhi_dev) {
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+ } else {
+ /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ }
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+
+ return mhi_dev;
+}
+
+static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ int ret;
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+
+ /* Configure primary channel */
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int mhi_ep_destroy_device(struct device *dev, void *data)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mhi_ep_chan *ul_chan, *dl_chan;
+
+ if (dev->bus != &mhi_ep_bus_type)
+ return 0;
+
+ mhi_dev = to_mhi_ep_device(dev);
+ mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ /* Only destroy devices created for channels */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ ul_chan = mhi_dev->ul_chan;
+ dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ put_device(&ul_chan->mhi_dev->dev);
+
+ if (dl_chan)
+ put_device(&dl_chan->mhi_dev->dev);
+
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n",
+ mhi_dev->name);
+
+ /* Notify the client and remove the device from MHI bus */
+ device_del(dev);
+ put_device(dev);
+
+ return 0;
+}
+
+static int parse_ch_cfg(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+ int ret = -EINVAL;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * Allocate max_channels supported by the MHI endpoint and populate
+ * only the defined channels
+ */
+ mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+
+ /* Bi-directional and direction less channels are not supported */
+ if (mhi_chan->dir == DMA_BIDIRECTIONAL || mhi_chan->dir == DMA_NONE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+/*
+ * Allocate channel and command rings here. Event rings will be allocated
+ * in mhi_ep_power_up() as the config comes from the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ INIT_WORK(&mhi_cntrl->ring_work, mhi_ep_ring_worker);
+ INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker);
+ INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker);
+
+ mhi_cntrl->ring_wq = alloc_ordered_workqueue("mhi_ep_ring_wq", WQ_HIGHPRI);
+ if (!mhi_cntrl->ring_wq) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cntrl->state_wq = alloc_ordered_workqueue("mhi_ep_state_wq", WQ_HIGHPRI);
+ if (!mhi_cntrl->state_wq) {
+ ret = -ENOMEM;
+ goto err_destroy_ring_wq;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
+ INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ spin_lock_init(&mhi_cntrl->list_lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before enumeration */
+ mhi_ep_mmio_write(mhi_cntrl, MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+
+ /* Set controller index */
+ mhi_cntrl->index = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL);
+ if (mhi_cntrl->index < 0) {
+ ret = mhi_cntrl->index;
+ goto err_destroy_state_wq;
+ }
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ: %d\n", ret);
+ goto err_ida_free;
+ }
+
+ /* Allocate the controller device */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_free_irq;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ dev_set_name(&mhi_dev->dev, "mhi_ep%d", mhi_cntrl->index);
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_release_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_release_dev:
+ put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+err_ida_free:
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+err_destroy_state_wq:
+ destroy_workqueue(mhi_cntrl->state_wq);
+err_destroy_ring_wq:
+ destroy_workqueue(mhi_cntrl->ring_wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_register_controller);
+
+/*
+ * It is expected that the controller drivers will power down the MHI EP stack
+ * using "mhi_ep_power_down()" before calling this function to unregister themselves.
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev;
+
+ destroy_workqueue(mhi_cntrl->state_wq);
+ destroy_workqueue(mhi_cntrl->ring_wq);
+
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
+ kfree(mhi_cntrl->mhi_cmd);
+ kfree(mhi_cntrl->mhi_chan);
+
+ device_del(&mhi_dev->dev);
+ put_device(&mhi_dev->dev);
+
+ ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller);
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+
+ if (dl_chan)
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver);
+ struct mhi_result result = {};
+ struct mhi_ep_chan *mhi_chan;
+ int dir;
+
+ /* Skip if it is a controller device */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ /* Disconnect the channels associated with the driver */
+ for (dir = 0; dir < 2; dir++) {
+ mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
+
+ if (!mhi_chan)
+ continue;
+
+ mutex_lock(&mhi_chan->lock);
+ /* Send channel disconnect status to the client driver */
+ if (mhi_chan->xfer_cb) {
+ result.transaction_status = -ENOTCONN;
+ result.bytes_xferd = 0;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ }
+
+ /* Set channel state to DISABLED */
+ mhi_chan->state = MHI_CH_STATE_DISABLED;
+ mhi_chan->xfer_cb = NULL;
+ mutex_unlock(&mhi_chan->lock);
+ }
+
+ /* Remove the client driver now */
+ mhi_drv->remove(mhi_dev);
+
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Bus Endpoint stack");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 000000000000..157ef1240f6f
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+
+#include "internal.h"
+
+void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval)
+{
+ *regval = readl(mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 regval;
+
+ mhi_ep_mmio_read(mhi_cntrl, offset, &regval);
+ regval &= ~mask;
+ regval |= ((val << shift) & mask);
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+
+int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset,
+ u32 mask, u32 shift, u32 *regval)
+{
+ mhi_ep_mmio_read(dev, offset, regval);
+ *regval &= mask;
+ *regval >>= shift;
+
+ return 0;
+}
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICTRL, &regval);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+
+static void mhi_ep_mmio_mask_set_chdb_int_a7(struct mhi_ep_cntrl *mhi_cntrl,
+ u32 chdb_id, bool enable)
+{
+ u32 chid_mask, chid_idx, chid_shft, val = 0;
+
+ chid_shft = chdb_id % 32;
+ chid_mask = BIT(chid_shft);
+ chid_idx = chdb_id / 32;
+
+ if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB)
+ return;
+
+ if (enable)
+ val = 1;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ chid_mask, chid_shft, val);
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ &mhi_cntrl->chdb[chid_idx].mask);
+}
+
+void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id)
+{
+ mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, true);
+}
+
+void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id)
+{
+ mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, false);
+}
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val = 0, i = 0;
+
+ if (enable)
+ val = MHI_CHDB_INT_MASK_A7_n_EN_ALL;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+
+static void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_A7_n(i),
+ &mhi_cntrl->chdb[i].status);
+}
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val = 0, i;
+
+ if (enable)
+ val = MHI_ERDB_INT_MASK_A7_n_EN_ALL;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_A7_n(i), val);
+}
+
+static void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK,
+ MHI_CTRL_MHICTRL_SHFT, 1);
+}
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK,
+ MHI_CTRL_MHICTRL_SHFT, 0);
+}
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK,
+ MHI_CTRL_CRDB_SHFT, 1);
+}
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK,
+ MHI_CTRL_CRDB_SHFT, 0);
+}
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+
+static void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i = 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i),
+ MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_A7_n(i),
+ MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ccabap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, CCABAP_HIGHER, &ccabap_value);
+ mhi_cntrl->ch_ctx_host_pa = ccabap_value;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, CCABAP_LOWER, &ccabap_value);
+ mhi_cntrl->ch_ctx_host_pa |= ccabap_value;
+}
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ecabap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ECABAP_HIGHER, &ecabap_value);
+ mhi_cntrl->ev_ctx_host_pa = ecabap_value;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ECABAP_LOWER, &ecabap_value);
+ mhi_cntrl->ev_ctx_host_pa |= ecabap_value;
+}
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 crcbap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, CRCBAP_HIGHER, &crcbap_value);
+ mhi_cntrl->cmd_ctx_host_pa = crcbap_value;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, CRCBAP_LOWER, &crcbap_value);
+ mhi_cntrl->cmd_ctx_host_pa |= crcbap_value;
+}
+
+void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_ptr)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_ptr = value;
+ *wr_ptr <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+
+ *wr_ptr |= value;
+}
+
+void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_ptr)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_ptr = value;
+ *wr_ptr <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+
+ *wr_ptr |= value;
+}
+
+void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_ptr)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_ptr = value;
+ *wr_ptr <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+ *wr_ptr |= value;
+}
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, BHI_EXECENV, value);
+}
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHICTRL, MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, 0);
+}
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ int mhi_cfg = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, CHDBOFF, &mhi_cntrl->chdb_offset);
+ mhi_ep_mmio_read(mhi_cntrl, ERDBOFF, &mhi_cntrl->erdb_offset);
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ int mhi_cfg = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg);
+}
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 000000000000..11adfb659f16
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr)
+{
+ u64 rbase;
+
+ rbase = ring->ring_ctx->generic.rbase;
+
+ return (ptr - rbase) / sizeof(struct mhi_ep_ring_element);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ return ring->ring_ctx->generic.rlen / sizeof(struct mhi_ep_ring_element);
+}
+
+void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring)
+{
+ ring->rd_offset++;
+ if (ring->rd_offset == ring->ring_size)
+ ring->rd_offset = 0;
+}
+
+static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t start, copy_size;
+ struct mhi_ep_ring_element *ring_shadow;
+ phys_addr_t ring_shadow_phys;
+ size_t size = ring->ring_size * sizeof(struct mhi_ep_ring_element);
+ int ret;
+
+ /* No need to cache event rings */
+ if (ring->type == RING_TYPE_ER)
+ return 0;
+
+ /* No need to cache the ring if write pointer is unmodified */
+ if (ring->wr_offset == end)
+ return 0;
+
+ start = ring->wr_offset;
+
+ /* Allocate memory for host ring */
+ ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys,
+ size);
+ if (!ring_shadow) {
+ dev_err(dev, "Failed to allocate memory for ring_shadow\n");
+ return -ENOMEM;
+ }
+
+ /* Map host ring */
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys,
+ ring->ring_ctx->generic.rbase, size);
+ if (ret) {
+ dev_err(dev, "Failed to map ring_shadow\n\n");
+ goto err_ring_free;
+ }
+
+ dev_dbg(dev, "Caching ring: start %d end %d size %d", start, end, copy_size);
+
+ if (start < end) {
+ copy_size = (end - start) * sizeof(struct mhi_ep_ring_element);
+ memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size);
+ } else {
+ copy_size = (ring->ring_size - start) * sizeof(struct mhi_ep_ring_element);
+ memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size);
+ if (end)
+ memcpy_fromio(&ring->ring_cache[0], &ring_shadow[0],
+ end * sizeof(struct mhi_ep_ring_element));
+ }
+
+ /* Now unmap and free host ring */
+ mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, size);
+
+ return 0;
+
+err_ring_free:
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, &ring_shadow, size);
+
+ return ret;
+}
+
+static int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr)
+{
+ size_t wr_offset;
+ int ret;
+
+ wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr);
+
+ /* Cache the host ring till write offset */
+ ret = __mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = wr_offset;
+
+ return 0;
+}
+
+static int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ u64 wr_ptr;
+
+ switch (ring->type) {
+ case RING_TYPE_CMD:
+ mhi_ep_mmio_get_cmd_db(ring, &wr_ptr);
+ break;
+ case RING_TYPE_ER:
+ mhi_ep_mmio_get_er_db(ring, &wr_ptr);
+ break;
+ case RING_TYPE_CH:
+ mhi_ep_mmio_get_ch_db(ring, &wr_ptr);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mhi_ep_cache_ring(ring, wr_ptr);
+}
+
+static int mhi_ep_process_ring_element(struct mhi_ep_ring *ring, size_t offset)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_element *el;
+ int ret = -ENODEV;
+
+ /* Get the element and invoke the respective callback */
+ el = &ring->ring_cache[offset];
+
+ if (ring->ring_cb)
+ ret = ring->ring_cb(ring, el);
+ else
+ dev_err(dev, "No callback registered for ring\n");
+
+ return ret;
+}
+
+int mhi_ep_process_ring(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret = 0;
+
+ /* Event rings should not be processed */
+ if (ring->type == RING_TYPE_ER)
+ return -EINVAL;
+
+ dev_dbg(dev, "Processing ring of type: %d\n", ring->type);
+
+ /* Update the write offset for the ring */
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write offset for ring\n");
+ return ret;
+ }
+
+ /* Sanity check to make sure there are elements in the ring */
+ if (ring->rd_offset == ring->wr_offset)
+ return 0;
+
+ /* Process channel ring first */
+ if (ring->type == RING_TYPE_CH) {
+ ret = mhi_ep_process_ring_element(ring, ring->rd_offset);
+ if (ret)
+ dev_err(dev, "Error processing ch ring element: %d\n", ring->rd_offset);
+
+ return ret;
+ }
+
+ /* Process command ring now */
+ while (ring->rd_offset != ring->wr_offset) {
+ ret = mhi_ep_process_ring_element(ring, ring->rd_offset);
+ if (ret) {
+ dev_err(dev, "Error processing cmd ring element: %d\n", ring->rd_offset);
+ return ret;
+ }
+
+ mhi_ep_ring_inc_index(ring);
+ }
+
+ return 0;
+}
+
+/* TODO: Support for adding multiple ring elements to the ring */
+int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el, int size)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring_element *ring_shadow;
+ size_t ring_size = ring->ring_size * sizeof(struct mhi_ep_ring_element);
+ phys_addr_t ring_shadow_phys;
+ size_t old_offset = 0;
+ u32 num_free_elem;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = (ring->wr_offset - ring->rd_offset) - 1;
+ else
+ num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1;
+
+ /* Check if there is space in ring for adding at least an element */
+ if (num_free_elem < 1) {
+ dev_err(dev, "No space left in the ring\n");
+ return -ENOSPC;
+ }
+
+ old_offset = ring->rd_offset;
+ mhi_ep_ring_inc_index(ring);
+
+ dev_dbg(dev, "Adding an element to ring at offset (%d)\n", ring->rd_offset);
+
+ /* Update rp in ring context */
+ ring->ring_ctx->generic.rp = (ring->rd_offset * sizeof(struct mhi_ep_ring_element)) +
+ ring->ring_ctx->generic.rbase;
+
+ /* Allocate memory for host ring */
+ ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys, ring_size);
+ if (!ring_shadow) {
+ dev_err(dev, "failed to allocate ring_shadow\n");
+ return -ENOMEM;
+ }
+
+ /* Map host ring */
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys,
+ ring->ring_ctx->generic.rbase, ring_size);
+ if (ret) {
+ dev_err(dev, "failed to map ring_shadow\n\n");
+ goto err_ring_free;
+ }
+
+ /* Copy the element to ring */
+ memcpy_toio(&ring_shadow[old_offset], el, sizeof(struct mhi_ep_ring_element));
+
+ /* Now unmap and free host ring */
+ mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size);
+
+ return 0;
+
+err_ring_free:
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size);
+
+ return ret;
+}
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id)
+{
+ ring->state = RING_STATE_UINT;
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->ring_cb = mhi_ep_process_cmd_ring;
+ ring->db_offset_h = CRDB_HIGHER;
+ ring->db_offset_l = CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->ring_cb = mhi_ep_process_tre_ring;
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else if (ring->type == RING_TYPE_ER) {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ ring->mhi_cntrl = mhi_cntrl;
+ ring->ring_ctx = ctx;
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+
+ /* During ring init, both rp and wp are equal */
+ ring->rd_offset = mhi_ep_ring_addr2offset(ring, ring->ring_ctx->generic.rp);
+ ring->wr_offset = mhi_ep_ring_addr2offset(ring, ring->ring_ctx->generic.rp);
+ ring->state = RING_STATE_IDLE;
+
+ /* Allocate ring cache memory for holding the copy of host ring */
+ ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ep_ring_element),
+ GFP_KERNEL);
+ if (!ring->ring_cache)
+ return -ENOMEM;
+
+ ret = mhi_ep_cache_ring(ring, ring->ring_ctx->generic.wp);
+ if (ret) {
+ dev_err(dev, "Failed to cache ring\n");
+ kfree(ring->ring_cache);
+ return ret;
+ }
+
+ return 0;
+}
+
+void mhi_ep_ring_stop(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring)
+{
+ ring->state = RING_STATE_UINT;
+ kfree(ring->ring_cache);
+}
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 000000000000..0bef5d808195
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/mhi_ep.h>
+#include "internal.h"
+
+bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state cur_mhi_state,
+ enum mhi_state mhi_state)
+{
+ bool valid = false;
+
+ switch (mhi_state) {
+ case MHI_STATE_READY:
+ valid = (cur_mhi_state == MHI_STATE_RESET);
+ break;
+ case MHI_STATE_M0:
+ valid = (cur_mhi_state == MHI_STATE_READY ||
+ cur_mhi_state == MHI_STATE_M3);
+ break;
+ case MHI_STATE_M3:
+ valid = (cur_mhi_state == MHI_STATE_M0);
+ break;
+ case MHI_STATE_SYS_ERR:
+ /* Transition to SYS_ERR state is allowed all the time */
+ valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return valid;
+}
+
+int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+
+ if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) {
+ dev_err(dev, "MHI state change to %s from %s is not allowed!\n",
+ TO_MHI_STATE_STR(mhi_state),
+ TO_MHI_STATE_STR(mhi_cntrl->mhi_state));
+ return -EACCES;
+ }
+
+ switch (mhi_state) {
+ case MHI_STATE_READY:
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, 1);
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, mhi_state);
+ break;
+ case MHI_STATE_SYS_ERR:
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT, 1);
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, mhi_state);
+ break;
+ case MHI_STATE_M1:
+ case MHI_STATE_M2:
+ dev_err(dev, "MHI state (%s) not supported\n", TO_MHI_STATE_STR(mhi_state));
+ return -EOPNOTSUPP;
+ case MHI_STATE_M0:
+ case MHI_STATE_M3:
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, mhi_state);
+ break;
+ default:
+ dev_err(dev, "Invalid MHI state (%d)", mhi_state);
+ return -EINVAL;
+ }
+
+ mhi_cntrl->mhi_state = mhi_state;
+
+ return 0;
+}
+
+int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state old_state;
+ int ret;
+
+ /* If MHI is in M3, resume suspended channels */
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ old_state = mhi_cntrl->mhi_state;
+ if (old_state == MHI_STATE_M3)
+ mhi_ep_resume_channels(mhi_cntrl);
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return ret;
+ }
+
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ /* Signal host that the device moved to M0 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0);
+ if (ret) {
+ dev_err(dev, "Failed sending M0 state change event: %d\n", ret);
+ return ret;
+ }
+
+ if (old_state == MHI_STATE_READY) {
+ /* Allow the host to process state change event */
+ mdelay(1);
+
+ /* Send AMSS EE event to host */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EP_AMSS_EE);
+ if (ret) {
+ dev_err(dev, "Failed sending AMSS EE event: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ mhi_ep_handle_syserr(mhi_cntrl);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return ret;
+ }
+
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ mhi_ep_suspend_channels(mhi_cntrl);
+
+ /* Signal host that the device moved to M3 */
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);
+ if (ret) {
+ dev_err(dev, "Failed sending M3 state change event: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state mhi_state;
+ int ret, is_ready;
+
+ spin_lock_bh(&mhi_cntrl->state_lock);
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &mhi_state);
+ mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &is_ready);
+
+ if (mhi_state != MHI_STATE_RESET || is_ready) {
+ dev_err(dev, "READY state transition failed. MHI host not in RESET state\n");
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+ return -EFAULT;
+ }
+
+ ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY);
+ spin_unlock_bh(&mhi_cntrl->state_lock);
+
+ return ret;
+}
diff --git a/drivers/bus/mhi/host/Kconfig b/drivers/bus/mhi/host/Kconfig
new file mode 100644
index 000000000000..da5cd0c9fc62
--- /dev/null
+++ b/drivers/bus/mhi/host/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# MHI bus
+#
+# Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+#
+
+config MHI_BUS
+ tristate "Modem Host Interface (MHI) bus"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_DEBUG
+ bool "Debugfs support for the MHI bus"
+ depends on MHI_BUS && DEBUG_FS
+ help
+ Enable debugfs support for use with the MHI transport. Allows
+ reading and/or modifying some values within the MHI controller
+ for debug and test purposes.
+
+config MHI_BUS_PCI_GENERIC
+ tristate "MHI PCI controller driver"
+ depends on MHI_BUS
+ depends on PCI
+ help
+ This driver provides MHI PCI controller driver for devices such as
+ Qualcomm SDX55 based PCIe modems.
+
diff --git a/drivers/bus/mhi/core/Makefile b/drivers/bus/mhi/host/Makefile
index c3feb4130aa3..859c2f38451c 100644
--- a/drivers/bus/mhi/core/Makefile
+++ b/drivers/bus/mhi/host/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_MHI_BUS) += mhi.o
-
mhi-y := init.o main.o pm.o boot.o
mhi-$(CONFIG_MHI_BUS_DEBUG) += debugfs.o
+
+obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
+mhi_pci_generic-y += pci_generic.o
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/host/boot.c
index 0a972620a403..0a972620a403 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/host/boot.c
diff --git a/drivers/bus/mhi/core/debugfs.c b/drivers/bus/mhi/host/debugfs.c
index 858d7516410b..858d7516410b 100644
--- a/drivers/bus/mhi/core/debugfs.c
+++ b/drivers/bus/mhi/host/debugfs.c
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/host/init.c
index 5aaca6d0f52b..fa904e7468d8 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -44,18 +44,6 @@ const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
[DEV_ST_TRANSITION_DISABLE] = "DISABLE",
};
-const char * const mhi_state_str[MHI_STATE_MAX] = {
- [MHI_STATE_RESET] = "RESET",
- [MHI_STATE_READY] = "READY",
- [MHI_STATE_M0] = "M0",
- [MHI_STATE_M1] = "M1",
- [MHI_STATE_M2] = "M2",
- [MHI_STATE_M3] = "M3",
- [MHI_STATE_M3_FAST] = "M3 FAST",
- [MHI_STATE_BHI] = "BHI",
- [MHI_STATE_SYS_ERR] = "SYS ERROR",
-};
-
const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
[MHI_CH_STATE_TYPE_RESET] = "RESET",
[MHI_CH_STATE_TYPE_STOP] = "STOP",
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/host/internal.h
index 3a732afaf73e..c882245b9133 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -7,246 +7,113 @@
#ifndef _MHI_INT_H
#define _MHI_INT_H
-#include <linux/mhi.h>
+#include "../common.h"
extern struct bus_type mhi_bus_type;
-#define MHIREGLEN (0x0)
-#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
-#define MHIREGLEN_MHIREGLEN_SHIFT (0)
-
-#define MHIVER (0x8)
-#define MHIVER_MHIVER_MASK (0xFFFFFFFF)
-#define MHIVER_MHIVER_SHIFT (0)
-
-#define MHICFG (0x10)
-#define MHICFG_NHWER_MASK (0xFF000000)
-#define MHICFG_NHWER_SHIFT (24)
-#define MHICFG_NER_MASK (0xFF0000)
-#define MHICFG_NER_SHIFT (16)
-#define MHICFG_NHWCH_MASK (0xFF00)
-#define MHICFG_NHWCH_SHIFT (8)
-#define MHICFG_NCH_MASK (0xFF)
-#define MHICFG_NCH_SHIFT (0)
-
-#define CHDBOFF (0x18)
-#define CHDBOFF_CHDBOFF_MASK (0xFFFFFFFF)
-#define CHDBOFF_CHDBOFF_SHIFT (0)
-
-#define ERDBOFF (0x20)
-#define ERDBOFF_ERDBOFF_MASK (0xFFFFFFFF)
-#define ERDBOFF_ERDBOFF_SHIFT (0)
-
-#define BHIOFF (0x28)
-#define BHIOFF_BHIOFF_MASK (0xFFFFFFFF)
-#define BHIOFF_BHIOFF_SHIFT (0)
-
-#define BHIEOFF (0x2C)
-#define BHIEOFF_BHIEOFF_MASK (0xFFFFFFFF)
-#define BHIEOFF_BHIEOFF_SHIFT (0)
-
-#define DEBUGOFF (0x30)
-#define DEBUGOFF_DEBUGOFF_MASK (0xFFFFFFFF)
-#define DEBUGOFF_DEBUGOFF_SHIFT (0)
-
-#define MHICTRL (0x38)
-#define MHICTRL_MHISTATE_MASK (0x0000FF00)
-#define MHICTRL_MHISTATE_SHIFT (8)
-#define MHICTRL_RESET_MASK (0x2)
-#define MHICTRL_RESET_SHIFT (1)
-
-#define MHISTATUS (0x48)
-#define MHISTATUS_MHISTATE_MASK (0x0000FF00)
-#define MHISTATUS_MHISTATE_SHIFT (8)
-#define MHISTATUS_SYSERR_MASK (0x4)
-#define MHISTATUS_SYSERR_SHIFT (2)
-#define MHISTATUS_READY_MASK (0x1)
-#define MHISTATUS_READY_SHIFT (0)
-
-#define CCABAP_LOWER (0x58)
-#define CCABAP_LOWER_CCABAP_LOWER_MASK (0xFFFFFFFF)
-#define CCABAP_LOWER_CCABAP_LOWER_SHIFT (0)
-
-#define CCABAP_HIGHER (0x5C)
-#define CCABAP_HIGHER_CCABAP_HIGHER_MASK (0xFFFFFFFF)
-#define CCABAP_HIGHER_CCABAP_HIGHER_SHIFT (0)
-
-#define ECABAP_LOWER (0x60)
-#define ECABAP_LOWER_ECABAP_LOWER_MASK (0xFFFFFFFF)
-#define ECABAP_LOWER_ECABAP_LOWER_SHIFT (0)
-
-#define ECABAP_HIGHER (0x64)
-#define ECABAP_HIGHER_ECABAP_HIGHER_MASK (0xFFFFFFFF)
-#define ECABAP_HIGHER_ECABAP_HIGHER_SHIFT (0)
-
-#define CRCBAP_LOWER (0x68)
-#define CRCBAP_LOWER_CRCBAP_LOWER_MASK (0xFFFFFFFF)
-#define CRCBAP_LOWER_CRCBAP_LOWER_SHIFT (0)
-
-#define CRCBAP_HIGHER (0x6C)
-#define CRCBAP_HIGHER_CRCBAP_HIGHER_MASK (0xFFFFFFFF)
-#define CRCBAP_HIGHER_CRCBAP_HIGHER_SHIFT (0)
-
-#define CRDB_LOWER (0x70)
-#define CRDB_LOWER_CRDB_LOWER_MASK (0xFFFFFFFF)
-#define CRDB_LOWER_CRDB_LOWER_SHIFT (0)
-
-#define CRDB_HIGHER (0x74)
-#define CRDB_HIGHER_CRDB_HIGHER_MASK (0xFFFFFFFF)
-#define CRDB_HIGHER_CRDB_HIGHER_SHIFT (0)
-
-#define MHICTRLBASE_LOWER (0x80)
-#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_MASK (0xFFFFFFFF)
-#define MHICTRLBASE_LOWER_MHICTRLBASE_LOWER_SHIFT (0)
-
-#define MHICTRLBASE_HIGHER (0x84)
-#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_MASK (0xFFFFFFFF)
-#define MHICTRLBASE_HIGHER_MHICTRLBASE_HIGHER_SHIFT (0)
-
-#define MHICTRLLIMIT_LOWER (0x88)
-#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_MASK (0xFFFFFFFF)
-#define MHICTRLLIMIT_LOWER_MHICTRLLIMIT_LOWER_SHIFT (0)
-
-#define MHICTRLLIMIT_HIGHER (0x8C)
-#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_MASK (0xFFFFFFFF)
-#define MHICTRLLIMIT_HIGHER_MHICTRLLIMIT_HIGHER_SHIFT (0)
-
-#define MHIDATABASE_LOWER (0x98)
-#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_MASK (0xFFFFFFFF)
-#define MHIDATABASE_LOWER_MHIDATABASE_LOWER_SHIFT (0)
-
-#define MHIDATABASE_HIGHER (0x9C)
-#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_MASK (0xFFFFFFFF)
-#define MHIDATABASE_HIGHER_MHIDATABASE_HIGHER_SHIFT (0)
-
-#define MHIDATALIMIT_LOWER (0xA0)
-#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_MASK (0xFFFFFFFF)
-#define MHIDATALIMIT_LOWER_MHIDATALIMIT_LOWER_SHIFT (0)
-
-#define MHIDATALIMIT_HIGHER (0xA4)
-#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_MASK (0xFFFFFFFF)
-#define MHIDATALIMIT_HIGHER_MHIDATALIMIT_HIGHER_SHIFT (0)
+/* MHI registers */
+#define MHIREGLEN 0x0
+#define MHIVER 0x8
+#define MHICFG 0x10
+#define CHDBOFF 0x18
+#define ERDBOFF 0x20
+#define BHIOFF 0x28
+#define BHIEOFF 0x2c
+#define DEBUGOFF 0x30
+#define MHICTRL 0x38
+#define MHISTATUS 0x48
+#define CCABAP_LOWER 0x58
+#define CCABAP_HIGHER 0x5c
+#define ECABAP_LOWER 0x60
+#define ECABAP_HIGHER 0x64
+#define CRCBAP_LOWER 0x68
+#define CRCBAP_HIGHER 0x6c
+#define CRDB_LOWER 0x70
+#define CRDB_HIGHER 0x74
+#define MHICTRLBASE_LOWER 0x80
+#define MHICTRLBASE_HIGHER 0x84
+#define MHICTRLLIMIT_LOWER 0x88
+#define MHICTRLLIMIT_HIGHER 0x8c
+#define MHIDATABASE_LOWER 0x98
+#define MHIDATABASE_HIGHER 0x9c
+#define MHIDATALIMIT_LOWER 0xa0
+#define MHIDATALIMIT_HIGHER 0xa4
/* Host request register */
-#define MHI_SOC_RESET_REQ_OFFSET (0xB0)
-#define MHI_SOC_RESET_REQ BIT(0)
+#define MHI_SOC_RESET_REQ_OFFSET 0xb0
+#define MHI_SOC_RESET_REQ BIT(0)
/* MHI BHI offfsets */
-#define BHI_BHIVERSION_MINOR (0x00)
-#define BHI_BHIVERSION_MAJOR (0x04)
-#define BHI_IMGADDR_LOW (0x08)
-#define BHI_IMGADDR_HIGH (0x0C)
-#define BHI_IMGSIZE (0x10)
-#define BHI_RSVD1 (0x14)
-#define BHI_IMGTXDB (0x18)
-#define BHI_TXDB_SEQNUM_BMSK (0x3FFFFFFF)
-#define BHI_TXDB_SEQNUM_SHFT (0)
-#define BHI_RSVD2 (0x1C)
-#define BHI_INTVEC (0x20)
-#define BHI_RSVD3 (0x24)
-#define BHI_EXECENV (0x28)
-#define BHI_STATUS (0x2C)
-#define BHI_ERRCODE (0x30)
-#define BHI_ERRDBG1 (0x34)
-#define BHI_ERRDBG2 (0x38)
-#define BHI_ERRDBG3 (0x3C)
-#define BHI_SERIALNU (0x40)
-#define BHI_SBLANTIROLLVER (0x44)
-#define BHI_NUMSEG (0x48)
-#define BHI_MSMHWID(n) (0x4C + (0x4 * (n)))
-#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
-#define BHI_RSVD5 (0xC4)
-#define BHI_STATUS_MASK (0xC0000000)
-#define BHI_STATUS_SHIFT (30)
-#define BHI_STATUS_ERROR (3)
-#define BHI_STATUS_SUCCESS (2)
-#define BHI_STATUS_RESET (0)
+#define BHI_BHIVERSION_MINOR 0x00
+#define BHI_BHIVERSION_MAJOR 0x04
+#define BHI_IMGADDR_LOW 0x08
+#define BHI_IMGADDR_HIGH 0x0c
+#define BHI_IMGSIZE 0x10
+#define BHI_RSVD1 0x14
+#define BHI_IMGTXDB 0x18
+#define BHI_TXDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHI_TXDB_SEQNUM_SHFT 0
+#define BHI_RSVD2 0x1c
+#define BHI_INTVEC 0x20
+#define BHI_RSVD3 0x24
+#define BHI_EXECENV 0x28
+#define BHI_STATUS 0x2c
+#define BHI_ERRCODE 0x30
+#define BHI_ERRDBG1 0x34
+#define BHI_ERRDBG2 0x38
+#define BHI_ERRDBG3 0x3c
+#define BHI_SERIALNU 0x40
+#define BHI_SBLANTIROLLVER 0x44
+#define BHI_NUMSEG 0x48
+#define BHI_MSMHWID(n) (0x4c + (0x4 * (n)))
+#define BHI_OEMPKHASH(n) (0x64 + (0x4 * (n)))
+#define BHI_RSVD5 0xc4
+#define BHI_STATUS_MASK GENMASK(31, 30)
+#define BHI_STATUS_SHIFT 30
+#define BHI_STATUS_ERROR 3
+#define BHI_STATUS_SUCCESS 2
+#define BHI_STATUS_RESET 0
/* MHI BHIE offsets */
-#define BHIE_MSMSOCID_OFFS (0x0000)
-#define BHIE_TXVECADDR_LOW_OFFS (0x002C)
-#define BHIE_TXVECADDR_HIGH_OFFS (0x0030)
-#define BHIE_TXVECSIZE_OFFS (0x0034)
-#define BHIE_TXVECDB_OFFS (0x003C)
-#define BHIE_TXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
-#define BHIE_TXVECDB_SEQNUM_SHFT (0)
-#define BHIE_TXVECSTATUS_OFFS (0x0044)
-#define BHIE_TXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
-#define BHIE_TXVECSTATUS_SEQNUM_SHFT (0)
-#define BHIE_TXVECSTATUS_STATUS_BMSK (0xC0000000)
-#define BHIE_TXVECSTATUS_STATUS_SHFT (30)
-#define BHIE_TXVECSTATUS_STATUS_RESET (0x00)
-#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL (0x02)
-#define BHIE_TXVECSTATUS_STATUS_ERROR (0x03)
-#define BHIE_RXVECADDR_LOW_OFFS (0x0060)
-#define BHIE_RXVECADDR_HIGH_OFFS (0x0064)
-#define BHIE_RXVECSIZE_OFFS (0x0068)
-#define BHIE_RXVECDB_OFFS (0x0070)
-#define BHIE_RXVECDB_SEQNUM_BMSK (0x3FFFFFFF)
-#define BHIE_RXVECDB_SEQNUM_SHFT (0)
-#define BHIE_RXVECSTATUS_OFFS (0x0078)
-#define BHIE_RXVECSTATUS_SEQNUM_BMSK (0x3FFFFFFF)
-#define BHIE_RXVECSTATUS_SEQNUM_SHFT (0)
-#define BHIE_RXVECSTATUS_STATUS_BMSK (0xC0000000)
-#define BHIE_RXVECSTATUS_STATUS_SHFT (30)
-#define BHIE_RXVECSTATUS_STATUS_RESET (0x00)
-#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL (0x02)
-#define BHIE_RXVECSTATUS_STATUS_ERROR (0x03)
-
-#define SOC_HW_VERSION_OFFS (0x224)
-#define SOC_HW_VERSION_FAM_NUM_BMSK (0xF0000000)
-#define SOC_HW_VERSION_FAM_NUM_SHFT (28)
-#define SOC_HW_VERSION_DEV_NUM_BMSK (0x0FFF0000)
-#define SOC_HW_VERSION_DEV_NUM_SHFT (16)
-#define SOC_HW_VERSION_MAJOR_VER_BMSK (0x0000FF00)
-#define SOC_HW_VERSION_MAJOR_VER_SHFT (8)
-#define SOC_HW_VERSION_MINOR_VER_BMSK (0x000000FF)
-#define SOC_HW_VERSION_MINOR_VER_SHFT (0)
-
-#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
-#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
-#define EV_CTX_INTMODC_SHIFT 8
-#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
-#define EV_CTX_INTMODT_SHIFT 16
-struct mhi_event_ctxt {
- __u32 intmod;
- __u32 ertype;
- __u32 msivec;
-
- __u64 rbase __packed __aligned(4);
- __u64 rlen __packed __aligned(4);
- __u64 rp __packed __aligned(4);
- __u64 wp __packed __aligned(4);
-};
-
-#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
-#define CHAN_CTX_CHSTATE_SHIFT 0
-#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
-#define CHAN_CTX_BRSTMODE_SHIFT 8
-#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
-#define CHAN_CTX_POLLCFG_SHIFT 10
-#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
-struct mhi_chan_ctxt {
- __u32 chcfg;
- __u32 chtype;
- __u32 erindex;
-
- __u64 rbase __packed __aligned(4);
- __u64 rlen __packed __aligned(4);
- __u64 rp __packed __aligned(4);
- __u64 wp __packed __aligned(4);
-};
-
-struct mhi_cmd_ctxt {
- __u32 reserved0;
- __u32 reserved1;
- __u32 reserved2;
-
- __u64 rbase __packed __aligned(4);
- __u64 rlen __packed __aligned(4);
- __u64 rp __packed __aligned(4);
- __u64 wp __packed __aligned(4);
-};
+#define BHIE_MSMSOCID_OFFS 0x0000
+#define BHIE_TXVECADDR_LOW_OFFS 0x002c
+#define BHIE_TXVECADDR_HIGH_OFFS 0x0030
+#define BHIE_TXVECSIZE_OFFS 0x0034
+#define BHIE_TXVECDB_OFFS 0x003c
+#define BHIE_TXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECDB_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_OFFS 0x0044
+#define BHIE_TXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_TXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_TXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_TXVECSTATUS_STATUS_SHFT 30
+#define BHIE_TXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_TXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_TXVECSTATUS_STATUS_ERROR 0x03
+#define BHIE_RXVECADDR_LOW_OFFS 0x0060
+#define BHIE_RXVECADDR_HIGH_OFFS 0x0064
+#define BHIE_RXVECSIZE_OFFS 0x0068
+#define BHIE_RXVECDB_OFFS 0x0070
+#define BHIE_RXVECDB_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECDB_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_OFFS 0x0078
+#define BHIE_RXVECSTATUS_SEQNUM_BMSK GENMASK(29, 0)
+#define BHIE_RXVECSTATUS_SEQNUM_SHFT 0
+#define BHIE_RXVECSTATUS_STATUS_BMSK GENMASK(31, 30)
+#define BHIE_RXVECSTATUS_STATUS_SHFT 30
+#define BHIE_RXVECSTATUS_STATUS_RESET 0x00
+#define BHIE_RXVECSTATUS_STATUS_XFER_COMPL 0x02
+#define BHIE_RXVECSTATUS_STATUS_ERROR 0x03
+
+#define SOC_HW_VERSION_OFFS 0x224
+#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
+#define SOC_HW_VERSION_FAM_NUM_SHFT 28
+#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
+#define SOC_HW_VERSION_DEV_NUM_SHFT 16
+#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
+#define SOC_HW_VERSION_MAJOR_VER_SHFT 8
+#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
+#define SOC_HW_VERSION_MINOR_VER_SHFT 0
struct mhi_ctxt {
struct mhi_event_ctxt *er_ctxt;
@@ -267,108 +134,6 @@ struct bhi_vec_entry {
u64 size;
};
-enum mhi_cmd_type {
- MHI_CMD_NOP = 1,
- MHI_CMD_RESET_CHAN = 16,
- MHI_CMD_STOP_CHAN = 17,
- MHI_CMD_START_CHAN = 18,
-};
-
-/* No operation command */
-#define MHI_TRE_CMD_NOOP_PTR (0)
-#define MHI_TRE_CMD_NOOP_DWORD0 (0)
-#define MHI_TRE_CMD_NOOP_DWORD1 (MHI_CMD_NOP << 16)
-
-/* Channel reset command */
-#define MHI_TRE_CMD_RESET_PTR (0)
-#define MHI_TRE_CMD_RESET_DWORD0 (0)
-#define MHI_TRE_CMD_RESET_DWORD1(chid) ((chid << 24) | \
- (MHI_CMD_RESET_CHAN << 16))
-
-/* Channel stop command */
-#define MHI_TRE_CMD_STOP_PTR (0)
-#define MHI_TRE_CMD_STOP_DWORD0 (0)
-#define MHI_TRE_CMD_STOP_DWORD1(chid) ((chid << 24) | \
- (MHI_CMD_STOP_CHAN << 16))
-
-/* Channel start command */
-#define MHI_TRE_CMD_START_PTR (0)
-#define MHI_TRE_CMD_START_DWORD0 (0)
-#define MHI_TRE_CMD_START_DWORD1(chid) ((chid << 24) | \
- (MHI_CMD_START_CHAN << 16))
-
-#define MHI_TRE_GET_CMD_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
-#define MHI_TRE_GET_CMD_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
-
-/* Event descriptor macros */
-#define MHI_TRE_EV_PTR(ptr) (ptr)
-#define MHI_TRE_EV_DWORD0(code, len) ((code << 24) | len)
-#define MHI_TRE_EV_DWORD1(chid, type) ((chid << 24) | (type << 16))
-#define MHI_TRE_GET_EV_PTR(tre) ((tre)->ptr)
-#define MHI_TRE_GET_EV_CODE(tre) (((tre)->dword[0] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_LEN(tre) ((tre)->dword[0] & 0xFFFF)
-#define MHI_TRE_GET_EV_CHID(tre) (((tre)->dword[1] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
-#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
-#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
-#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
-#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
-#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
-
-/* Transfer descriptor macros */
-#define MHI_TRE_DATA_PTR(ptr) (ptr)
-#define MHI_TRE_DATA_DWORD0(len) (len & MHI_MAX_MTU)
-#define MHI_TRE_DATA_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \
- | (ieot << 9) | (ieob << 8) | chain)
-
-/* RSC transfer descriptor macros */
-#define MHI_RSCTRE_DATA_PTR(ptr, len) (((u64)len << 48) | ptr)
-#define MHI_RSCTRE_DATA_DWORD0(cookie) (cookie)
-#define MHI_RSCTRE_DATA_DWORD1 (MHI_PKT_TYPE_COALESCING << 16)
-
-enum mhi_pkt_type {
- MHI_PKT_TYPE_INVALID = 0x0,
- MHI_PKT_TYPE_NOOP_CMD = 0x1,
- MHI_PKT_TYPE_TRANSFER = 0x2,
- MHI_PKT_TYPE_COALESCING = 0x8,
- MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
- MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
- MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
- MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
- MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
- MHI_PKT_TYPE_TX_EVENT = 0x22,
- MHI_PKT_TYPE_RSC_TX_EVENT = 0x28,
- MHI_PKT_TYPE_EE_EVENT = 0x40,
- MHI_PKT_TYPE_TSYNC_EVENT = 0x48,
- MHI_PKT_TYPE_BW_REQ_EVENT = 0x50,
- MHI_PKT_TYPE_STALE_EVENT, /* internal event */
-};
-
-/* MHI transfer completion events */
-enum mhi_ev_ccs {
- MHI_EV_CC_INVALID = 0x0,
- MHI_EV_CC_SUCCESS = 0x1,
- MHI_EV_CC_EOT = 0x2, /* End of transfer event */
- MHI_EV_CC_OVERFLOW = 0x3,
- MHI_EV_CC_EOB = 0x4, /* End of block event */
- MHI_EV_CC_OOB = 0x5, /* Out of block event */
- MHI_EV_CC_DB_MODE = 0x6,
- MHI_EV_CC_UNDEFINED_ERR = 0x10,
- MHI_EV_CC_BAD_TRE = 0x11,
-};
-
-enum mhi_ch_state {
- MHI_CH_STATE_DISABLED = 0x0,
- MHI_CH_STATE_ENABLED = 0x1,
- MHI_CH_STATE_RUNNING = 0x2,
- MHI_CH_STATE_SUSPENDED = 0x3,
- MHI_CH_STATE_STOP = 0x4,
- MHI_CH_STATE_ERROR = 0x5,
-};
-
enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_RESET,
MHI_CH_STATE_TYPE_STOP,
@@ -409,11 +174,6 @@ extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
"INVALID_STATE" : dev_state_tran_str[state])
-extern const char * const mhi_state_str[MHI_STATE_MAX];
-#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \
- !mhi_state_str[state]) ? \
- "INVALID_STATE" : mhi_state_str[state])
-
/* internal power states */
enum mhi_pm_state {
MHI_PM_STATE_DISABLE,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/host/main.c
index b15c5bc37dd4..b15c5bc37dd4 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/host/main.c
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 4c577a731709..4c577a731709 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/host/pm.c
index 547e6e769546..547e6e769546 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/host/pm.c
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6cccc3dc00bc..7552bcc7a746 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -453,6 +453,15 @@ config MHI_NET
QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55).
Say Y or M.
+config MHI_EP_NET
+ tristate "MHI Endpoint network driver"
+ depends on MHI_BUS_EP
+ help
+ This is the network driver for MHI bus implementation in endpoint
+ devices. It is used provide the network interface for QCOM modems
+ such as SDX55.
+ Say Y or M.
+
endif # NET_CORE
config SUNGEM_PHY
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 50b23e71065f..ebe54c30651a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
+obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o
#
# Networking Drivers
diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c
new file mode 100644
index 000000000000..7b3a23852457
--- /dev/null
+++ b/drivers/net/mhi_ep_net.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MHI Endpoint Network driver
+ *
+ * Based on drivers/net/mhi_net.c
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/if_arp.h>
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/u64_stats_sync.h>
+
+#define MHI_NET_MIN_MTU ETH_MIN_MTU
+#define MHI_NET_MAX_MTU 0xffff
+
+struct mhi_ep_net_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t rx_errors;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ u64_stats_t tx_errors;
+ u64_stats_t tx_dropped;
+ struct u64_stats_sync tx_syncp;
+ struct u64_stats_sync rx_syncp;
+};
+
+struct mhi_ep_net_dev {
+ struct mhi_ep_device *mdev;
+ struct net_device *ndev;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ struct mhi_ep_net_stats stats;
+ struct workqueue_struct *xmit_wq;
+ struct work_struct xmit_work;
+ struct sk_buff_head tx_buffers;
+ spinlock_t tx_lock; /* Lock for protecting tx_buffers */
+};
+
+struct mhi_ep_device_info {
+ const char *netname;
+};
+
+static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work)
+{
+ struct mhi_ep_net_dev *client = container_of(work,
+ struct mhi_ep_net_dev, xmit_work);
+ struct mhi_ep_device *mdev = client->mdev;
+ struct sk_buff *skb = NULL;
+ unsigned long flags = 0;
+ int ret;
+
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(client->ndev);
+ return;
+ }
+
+ while (!skb_queue_empty(&client->tx_buffers)) {
+ spin_lock_irqsave(&client->tx_lock, flags);
+ skb = skb_dequeue(&client->tx_buffers);
+ if (!skb) {
+ spin_unlock_irqrestore(&client->tx_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&client->tx_lock, flags);
+
+ ret = mhi_ep_queue_skb(mdev, DMA_FROM_DEVICE, skb, skb->len, MHI_EOT);
+ if (ret) {
+ kfree(skb);
+ return;
+ }
+
+ u64_stats_update_begin(&client->stats.tx_syncp);
+ u64_stats_inc(&client->stats.tx_packets);
+ u64_stats_update_end(&client->stats.tx_syncp);
+
+ /* Check if queue is empty */
+ if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) {
+ netif_stop_queue(client->ndev);
+ break;
+ }
+
+ consume_skb(skb);
+ }
+}
+
+static int mhi_ndo_open(struct net_device *ndev)
+{
+ /* Carrier is established via out-of-band channel (e.g. qmi) */
+ netif_carrier_on(ndev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int mhi_ndo_stop(struct net_device *ndev)
+{
+ netif_stop_queue(ndev);
+ netif_carrier_off(ndev);
+
+ return 0;
+}
+
+static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhi_ep_netdev->tx_lock, flags);
+ skb_queue_tail(&mhi_ep_netdev->tx_buffers, skb);
+ spin_unlock_irqrestore(&mhi_ep_netdev->tx_lock, flags);
+
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void mhi_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.rx_syncp);
+ stats->rx_packets = u64_stats_read(&mhi_ep_netdev->stats.rx_packets);
+ stats->rx_bytes = u64_stats_read(&mhi_ep_netdev->stats.rx_bytes);
+ stats->rx_errors = u64_stats_read(&mhi_ep_netdev->stats.rx_errors);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.rx_syncp, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.tx_syncp);
+ stats->tx_packets = u64_stats_read(&mhi_ep_netdev->stats.tx_packets);
+ stats->tx_bytes = u64_stats_read(&mhi_ep_netdev->stats.tx_bytes);
+ stats->tx_errors = u64_stats_read(&mhi_ep_netdev->stats.tx_errors);
+ stats->tx_dropped = u64_stats_read(&mhi_ep_netdev->stats.tx_dropped);
+ } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.tx_syncp, start));
+}
+
+static const struct net_device_ops mhi_ep_netdev_ops = {
+ .ndo_open = mhi_ndo_open,
+ .ndo_stop = mhi_ndo_stop,
+ .ndo_start_xmit = mhi_ndo_xmit,
+ .ndo_get_stats64 = mhi_ndo_get_stats64,
+};
+
+static void mhi_ep_net_setup(struct net_device *ndev)
+{
+ ndev->header_ops = NULL; /* No header */
+ ndev->type = ARPHRD_RAWIP;
+ ndev->hard_header_len = 0;
+ ndev->addr_len = 0;
+ ndev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ ndev->netdev_ops = &mhi_ep_netdev_ops;
+ ndev->mtu = MHI_EP_DEFAULT_MTU;
+ ndev->min_mtu = MHI_NET_MIN_MTU;
+ ndev->max_mtu = MHI_NET_MAX_MTU;
+ ndev->tx_queue_len = 1000;
+}
+
+static struct sk_buff *mhi_ep_net_skb_agg(struct mhi_ep_net_dev *mhi_ep_netdev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *head = mhi_ep_netdev->skbagg_head;
+ struct sk_buff *tail = mhi_ep_netdev->skbagg_tail;
+
+ /* This is non-paged skb chaining using frag_list */
+ if (!head) {
+ mhi_ep_netdev->skbagg_head = skb;
+ return skb;
+ }
+
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = skb;
+ else
+ tail->next = skb;
+
+ head->len += skb->len;
+ head->data_len += skb->len;
+ head->truesize += skb->truesize;
+
+ mhi_ep_netdev->skbagg_tail = skb;
+
+ return mhi_ep_netdev->skbagg_head;
+}
+
+static void mhi_ep_net_ul_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+ struct net_device *ndev = mhi_ep_netdev->ndev;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(ndev, 8192);
+ if (!skb) {
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ }
+
+ skb_copy_to_linear_data(skb, mhi_res->buf_addr, mhi_res->bytes_xferd);
+ skb->len = mhi_res->bytes_xferd;
+ skb->dev = mhi_ep_netdev->ndev;
+
+ if (unlikely(mhi_res->transaction_status)) {
+ switch (mhi_res->transaction_status) {
+ case -EOVERFLOW:
+ /* Packet can not fit in one MHI buffer and has been
+ * split over multiple MHI transfers, do re-aggregation.
+ * That usually means the device side MTU is larger than
+ * the host side MTU/MRU. Since this is not optimal,
+ * print a warning (once).
+ */
+ netdev_warn_once(mhi_ep_netdev->ndev,
+ "Fragmented packets received, fix MTU?\n");
+ skb_put(skb, mhi_res->bytes_xferd);
+ mhi_ep_net_skb_agg(mhi_ep_netdev, skb);
+ break;
+ case -ENOTCONN:
+ /* MHI layer stopping/resetting the DL channel */
+ dev_kfree_skb_any(skb);
+ return;
+ default:
+ /* Unknown error, simply drop */
+ dev_kfree_skb_any(skb);
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_errors);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ }
+ } else {
+ skb_put(skb, mhi_res->bytes_xferd);
+
+ if (mhi_ep_netdev->skbagg_head) {
+ /* Aggregate the final fragment */
+ skb = mhi_ep_net_skb_agg(mhi_ep_netdev, skb);
+ mhi_ep_netdev->skbagg_head = NULL;
+ }
+
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+
+ u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_inc(&mhi_ep_netdev->stats.rx_packets);
+ u64_stats_add(&mhi_ep_netdev->stats.rx_bytes, skb->len);
+ u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp);
+ netif_receive_skb(skb);
+ }
+}
+
+static void mhi_ep_net_dl_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (unlikely(mhi_res->transaction_status == -ENOTCONN))
+ return;
+
+ /* Since we got enough buffers to queue, wake the queue if stopped */
+ if (netif_queue_stopped(mhi_ep_netdev->ndev)) {
+ netif_wake_queue(mhi_ep_netdev->ndev);
+ queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work);
+ }
+}
+
+static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev;
+ int err;
+
+ mhi_ep_netdev = netdev_priv(ndev);
+
+ dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev);
+ mhi_ep_netdev->ndev = ndev;
+ mhi_ep_netdev->mdev = mhi_dev;
+ mhi_ep_netdev->skbagg_head = NULL;
+
+ skb_queue_head_init(&mhi_ep_netdev->tx_buffers);
+ spin_lock_init(&mhi_ep_netdev->tx_lock);
+
+ u64_stats_init(&mhi_ep_netdev->stats.rx_syncp);
+ u64_stats_init(&mhi_ep_netdev->stats.tx_syncp);
+
+ mhi_ep_netdev->xmit_wq = create_singlethread_workqueue("mhi_ep_net_xmit_wq");
+ INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets);
+ err = register_netdev(ndev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void mhi_ep_net_dellink(struct mhi_ep_device *mhi_dev, struct net_device *ndev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev);
+
+ destroy_workqueue(mhi_ep_netdev->xmit_wq);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+ kfree_skb(mhi_ep_netdev->skbagg_head);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static int mhi_ep_net_probe(struct mhi_ep_device *mhi_dev, const struct mhi_device_id *id)
+{
+ const struct mhi_ep_device_info *info = (struct mhi_ep_device_info *)id->driver_data;
+ struct net_device *ndev;
+ int err;
+
+ ndev = alloc_netdev(sizeof(struct mhi_ep_net_dev), info->netname,
+ NET_NAME_PREDICTABLE, mhi_ep_net_setup);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &mhi_dev->dev);
+
+ err = mhi_ep_net_newlink(mhi_dev, ndev);
+ if (err) {
+ free_netdev(ndev);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_net_remove(struct mhi_ep_device *mhi_dev)
+{
+ struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev);
+
+ mhi_ep_net_dellink(mhi_dev, mhi_ep_netdev->ndev);
+}
+
+static const struct mhi_ep_device_info mhi_swip0 = {
+ .netname = "mhi_swip%d",
+};
+
+static const struct mhi_device_id mhi_ep_net_id_table[] = {
+ /* Software data PATH (from modem CPU) */
+ { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, mhi_ep_net_id_table);
+
+static struct mhi_ep_driver mhi_ep_net_driver = {
+ .probe = mhi_ep_net_probe,
+ .remove = mhi_ep_net_remove,
+ .dl_xfer_cb = mhi_ep_net_dl_callback,
+ .ul_xfer_cb = mhi_ep_net_ul_callback,
+ .id_table = mhi_ep_net_id_table,
+ .driver = {
+ .name = "mhi_ep_net",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_mhi_ep_driver(mhi_ep_net_driver);
+
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("MHI Endpoint Network driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index cfe66bf04c1d..ca423471733a 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -223,11 +223,8 @@ static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
disable_irq(pcie_ep->perst_irq);
}
-static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+static int qcom_pcie_enable_resources(struct qcom_pcie_ep *pcie_ep)
{
- struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
- struct device *dev = pci->dev;
- u32 val, offset;
int ret;
ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
@@ -247,6 +244,38 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
if (ret)
goto err_phy_exit;
+ return 0;
+
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_disable_resources(struct qcom_pcie_ep *pcie_ep)
+{
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
+ return ret;
+ }
+
/* Assert WAKE# to RC to indicate device is ready */
gpiod_set_value_cansleep(pcie_ep->wake, 1);
usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
@@ -335,7 +364,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to complete initialization: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
/*
@@ -355,13 +384,8 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
@@ -376,10 +400,7 @@ static void qcom_pcie_perst_assert(struct dw_pcie *pci)
return;
}
- phy_power_off(pcie_ep->phy);
- phy_exit(pcie_ep->phy);
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+ qcom_pcie_disable_resources(pcie_ep);
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
}
@@ -497,9 +518,11 @@ static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
dev_dbg(dev, "Received Linkdown event\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ pci_epc_linkdown(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
dev_dbg(dev, "Received BME event. Link is enabled!\n");
pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ pci_epc_bme_notify(pci->ep.epc);
} else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
@@ -645,43 +668,26 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
- if (ret)
+ ret = qcom_pcie_enable_resources(pcie_ep);
+ if (ret) {
+ dev_err(dev, "Failed to enable resources: %d\n", ret);
return ret;
-
- ret = qcom_pcie_ep_core_reset(pcie_ep);
- if (ret)
- goto err_disable_clk;
-
- ret = phy_init(pcie_ep->phy);
- if (ret)
- goto err_disable_clk;
-
- /* PHY needs to be powered on for dw_pcie_ep_init() */
- ret = phy_power_on(pcie_ep->phy);
- if (ret)
- goto err_phy_exit;
+ }
ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
if (ret) {
dev_err(dev, "Failed to initialize endpoint: %d\n", ret);
- goto err_phy_power_off;
+ goto err_disable_resources;
}
ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
if (ret)
- goto err_phy_power_off;
+ goto err_disable_resources;
return 0;
-err_phy_power_off:
- phy_power_off(pcie_ep->phy);
-err_phy_exit:
- phy_exit(pcie_ep->phy);
-err_disable_clk:
- clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks),
- qcom_pcie_ep_clks);
+err_disable_resources:
+ qcom_pcie_disable_resources(pcie_ep);
return ret;
}
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..93497fb70e31 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,13 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_MHI
+ tristate "PCI Endpoint driver for MHI bus"
+ depends on PCI_ENDPOINT && MHI_BUS_EP
+ help
+ Enable this configuration option to enable the PCI Endpoint
+ driver for Modem Host Interface (MHI) bus found in Qualcomm
+ modems such as SDX55.
+
+ If in doubt, say "N" to disable Endpoint driver for MHI bus.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..eee99b2e9103 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
new file mode 100644
index 000000000000..dc81284cfd77
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI EPF driver for MHI Endpoint devices
+ *
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/mhi_ep.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define MHI_VERSION_1_0 0x01000000
+
+static struct workqueue_struct *pci_epf_mhi_wq;
+
+struct pci_epf_mhi_ep_info {
+ const struct mhi_ep_cntrl_config *config;
+ struct pci_epf_header *epf_header;
+ enum pci_barno bar_num;
+ u32 epf_flags;
+ u32 msi_count;
+};
+
+#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_TO_DEVICE, \
+ }
+
+#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_FROM_DEVICE, \
+ }
+
+static const struct mhi_ep_channel_config mhi_v1_channels[] = {
+ MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_UL(36, "IP_SW0"),
+ MHI_EP_CHANNEL_CONFIG_DL(37, "IP_SW0"),
+};
+
+static const struct mhi_ep_cntrl_config mhi_v1_config = {
+ .max_channels = 128,
+ .num_channels = ARRAY_SIZE(mhi_v1_channels),
+ .ch_cfg = mhi_v1_channels,
+ .mhi_version = MHI_VERSION_1_0,
+};
+
+static struct pci_epf_header sdx55_header = {
+ .vendorid = PCI_VENDOR_ID_QCOM,
+ .deviceid = 0x0306,
+ .baseclass_code = PCI_CLASS_OTHERS,
+ .interrupt_pin = PCI_INTERRUPT_INTA,
+};
+
+static const struct pci_epf_mhi_ep_info sdx55_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sdx55_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 4,
+};
+
+struct pci_epf_mhi {
+ const struct pci_epf_mhi_ep_info *info;
+ struct mhi_ep_cntrl mhi_cntrl;
+ struct work_struct work;
+ struct pci_epf *epf;
+ void __iomem *mmio;
+ resource_size_t mmio_phys;
+ enum pci_notify_event event;
+ u32 mmio_size;
+ int irq;
+ bool mhi_registered;
+ bool link_up;
+};
+
+void __iomem *pci_epf_mhi_alloc_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t *phys_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epc *epc = epf_mhi->epf->epc;
+
+ return pci_epc_mem_alloc_addr(epc, phys_addr, size);
+}
+
+void pci_epf_mhi_free_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epc *epc = epf_mhi->epf->epc;
+
+ pci_epc_mem_free_addr(epc, phys_addr, virt_addr, size);
+}
+
+inline int pci_epf_mhi_map_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, u64 pci_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ return pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, pci_addr, size);
+}
+
+void pci_epf_mhi_unmap_addr(struct mhi_ep_cntrl *mhi_cntrl, phys_addr_t phys_addr)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr);
+}
+
+void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ /* Using fixed MSI for now */
+ pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, 1);
+}
+
+static int pci_epf_mhi_notifier(struct notifier_block *nb, unsigned long val, void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct device *dev = &epf->dev;
+
+ /*
+ * If the notification is other than CORE_INIT and if MHI EP is not
+ * yet registered, then error out.
+ */
+ if ((val != CORE_INIT) && !epf_mhi->mhi_registered) {
+ dev_err(dev, "MHI EP not yet registered\n");
+ return NOTIFY_BAD;
+ }
+
+ epf_mhi->event = val;
+ queue_work(pci_epf_mhi_wq, &epf_mhi->work);
+
+ return NOTIFY_OK;
+}
+
+static void pci_epf_mhi_worker(struct work_struct *work)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, work);
+ const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ switch (epf_mhi->event) {
+ case CORE_INIT:
+ epf_bar->phys_addr = epf_mhi->mmio_phys;
+ epf_bar->size = epf_mhi->mmio_size;
+ epf_bar->barno = info->bar_num;
+ epf_bar->flags = info->epf_flags;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Failed to set BAR: %d\n", ret);
+ return;
+ }
+
+ ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
+ order_base_2(info->msi_count));
+ if (ret) {
+ dev_err(dev, "Failed to set MSI configuration: %d\n", ret);
+ return;
+ }
+
+ ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, epf->header);
+ if (ret) {
+ dev_err(dev, "Failed to set Configuration header: %d\n", ret);
+ return;
+ }
+
+ mhi_cntrl->mmio = epf_mhi->mmio;
+ mhi_cntrl->irq = epf_mhi->irq;
+
+ /* Assign the struct dev of PCI EP as MHI controller device */
+ mhi_cntrl->cntrl_dev = epc->dev.parent;
+ mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
+ mhi_cntrl->alloc_addr = pci_epf_mhi_alloc_addr;
+ mhi_cntrl->free_addr = pci_epf_mhi_free_addr;
+ mhi_cntrl->map_addr = pci_epf_mhi_map_addr;
+ mhi_cntrl->unmap_addr = pci_epf_mhi_unmap_addr;
+
+ /* Register the MHI EP controller */
+ ret = mhi_ep_register_controller(mhi_cntrl, info->config);
+ if (ret) {
+ dev_err(dev, "Failed to register MHI EP controller\n");
+ return;
+ }
+
+ epf_mhi->mhi_registered = true;
+ break;
+ case LINK_UP:
+ epf_mhi->link_up = true;
+ break;
+ case LINK_DOWN:
+ /*
+ * Power down the MHI EP stack and unregister the controller
+ * if both link and MHI EP stack were up
+ */
+ if (mhi_cntrl->is_enabled && epf_mhi->link_up) {
+ mhi_ep_power_down(mhi_cntrl);
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ epf_mhi->link_up = false;
+ break;
+ case BME:
+ /* Power up the MHI EP stack if link was up and stack was powered down */
+ if (!mhi_cntrl->is_enabled && epf_mhi->link_up) {
+ ret = mhi_ep_power_up(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to power up MHI EP\n");
+ return;
+ }
+ }
+
+ break;
+ default:
+ dev_err(&epf->dev, "Invalid MHI EP notifier event: %d\n", epf_mhi->event);
+ }
+}
+
+static int pci_epf_mhi_bind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ struct device *dev = &epf->dev;
+ struct resource *res;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ /* Get MMIO physical and virtual address from controller device */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ epf_mhi->mmio_phys = res->start;
+ epf_mhi->mmio_size = resource_size(res);
+
+ epf_mhi->mmio = ioremap_wc(epf_mhi->mmio_phys, epf_mhi->mmio_size);
+ if (IS_ERR(epf_mhi->mmio))
+ return PTR_ERR(epf_mhi->mmio);
+
+ ret = platform_get_irq_byname(pdev, "doorbell");
+ if (ret < 0) {
+ dev_err(dev, "Failed to get Doorbell IRQ\n");
+ iounmap(epf_mhi->mmio);
+ return ret;
+ }
+
+ epf_mhi->irq = ret;
+ epf->nb.notifier_call = pci_epf_mhi_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+
+ return 0;
+}
+
+static void pci_epf_mhi_unbind(struct pci_epf *epf)
+{
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_bar *epf_bar = &epf->bar[0];
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+
+ pci_epc_unregister_notifier(epc, &epf->nb);
+
+ /*
+ * Forcefully power down the MHI EP stack. Only way to bring the MHI EP stack
+ * back to working state after successive bind is by getting BME from host.
+ */
+ if (epf_mhi->mhi_registered) {
+ if (mhi_cntrl->is_enabled && epf_mhi->link_up)
+ mhi_ep_power_down(mhi_cntrl);
+
+ mhi_ep_unregister_controller(mhi_cntrl);
+ epf_mhi->mhi_registered = false;
+ }
+
+ pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar);
+ iounmap(epf_mhi->mmio);
+}
+
+static int pci_epf_mhi_probe(struct pci_epf *epf)
+{
+ struct pci_epf_mhi_ep_info *info =
+ (struct pci_epf_mhi_ep_info *) epf->driver->id_table->driver_data;
+ struct pci_epf_mhi *epf_mhi;
+ struct device *dev = &epf->dev;
+
+ epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
+ if (!epf_mhi)
+ return -ENOMEM;
+
+ epf->header = info->epf_header;
+ epf_mhi->info = info;
+ epf_mhi->epf = epf;
+
+ INIT_WORK(&epf_mhi->work, pci_epf_mhi_worker);
+
+ epf_set_drvdata(epf, epf_mhi);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
+ {
+ .name = "sdx55", .driver_data = (kernel_ulong_t) &sdx55_info,
+ },
+ {},
+};
+
+static struct pci_epf_ops pci_epf_mhi_ops = {
+ .unbind = pci_epf_mhi_unbind,
+ .bind = pci_epf_mhi_bind,
+};
+
+static struct pci_epf_driver pci_epf_mhi_driver = {
+ .driver.name = "pci_epf_mhi",
+ .probe = pci_epf_mhi_probe,
+ .id_table = pci_epf_mhi_ids,
+ .ops = &pci_epf_mhi_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_mhi_init(void)
+{
+ int ret;
+
+ pci_epf_mhi_wq = alloc_ordered_workqueue("pci_epf_mhi_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI);
+ if (!pci_epf_mhi_wq) {
+ pr_err("Failed to allocate the mhi_epf work queue\n");
+ return -ENOMEM;
+ }
+
+ ret = pci_epf_register_driver(&pci_epf_mhi_driver);
+ if (ret) {
+ destroy_workqueue(pci_epf_mhi_wq);
+ pr_err("Failed to register PCI EPF MHI driver: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_mhi_init);
+
+static void __exit pci_epf_mhi_exit(void)
+{
+ if (pci_epf_mhi_wq)
+ destroy_workqueue(pci_epf_mhi_wq);
+ pci_epf_unregister_driver(&pci_epf_mhi_driver);
+}
+module_exit(pci_epf_mhi_exit);
+
+MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint devices");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 38621558d397..19fa5802f5a8 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -698,6 +698,23 @@ void pci_epc_linkup(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
+ * connection with the Root Complex.
+ * @epc: the EPC device which has dropped the link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has dropped the
+ * connection with the Root Complex.
+ */
+void pci_epc_linkdown(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, LINK_DOWN, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkdown);
+
+/**
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
* initialization is completed.
* @epc: the EPC device whose core initialization is completed
@@ -715,6 +732,23 @@ void pci_epc_init_notify(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
+ * the BME event from the Root complex
+ * @epc: the EPC device that received the BME event
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Bus
+ * Master Enable (BME) event from the Root complex
+ */
+void pci_epc_bme_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, BME, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..a7715f8066ed
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+#define MHI_EP_DEFAULT_MTU 0x4000
+
+/**
+ * struct mhi_ep_channel_config - Channel configuration structure for controller
+ * @name: The name of this channel
+ * @num: The number assigned to this channel
+ * @num_elements: The number of elements that can be queued to this channel
+ * @dir: Direction that data may flow on this channel
+ */
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+/**
+ * struct mhi_ep_cntrl_config - MHI Endpoint controller configuration
+ * @max_channels: Maximum number of channels supported
+ * @num_channels: Number of channels defined in @ch_cfg
+ * @ch_cfg: Array of defined channels
+ * @mhi_version: MHI spec version supported by the controller
+ */
+struct mhi_ep_cntrl_config {
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+ u32 mhi_version;
+};
+
+/**
+ * struct mhi_ep_db_info - MHI Endpoint doorbell info
+ * @mask: Mask of the doorbell interrupt
+ * @status: Status of the doorbell interrupt
+ */
+struct mhi_ep_db_info {
+ u32 mask;
+ u32 status;
+};
+
+/**
+ * struct mhi_ep_cntrl - MHI Endpoint controller structure
+ * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI
+ * Endpoint controller
+ * @mhi_dev: MHI Endpoint device instance for the controller
+ * @mmio: MMIO region containing the MHI registers
+ * @mhi_chan: Points to the channel configuration table
+ * @mhi_event: Points to the event ring configurations table
+ * @mhi_cmd: Points to the command ring configurations table
+ * @sm: MHI Endpoint state machine
+ * @ch_ctx_cache: Cache of host channel context data structure
+ * @ev_ctx_cache: Cache of host event context data structure
+ * @cmd_ctx_cache: Cache of host command context data structure
+ * @ch_ctx_host_pa: Physical address of host channel context data structure
+ * @ev_ctx_host_pa: Physical address of host event context data structure
+ * @cmd_ctx_host_pa: Physical address of host command context data structure
+ * @ch_ctx_cache_phys: Physical address of the host channel context cache
+ * @ev_ctx_cache_phys: Physical address of the host event context cache
+ * @cmd_ctx_cache_phys: Physical address of the host command context cache
+ * @ch_ctx_host_size: Size of the host channel context data structure
+ * @ev_ctx_host_size: Size of the host event context data structure
+ * @cmd_ctx_host_size: Size of the host command context data structure
+ * @state_wq: Dedicated workqueue for handling MHI state transitions
+ * @ring_wq: Dedicated workqueue for processing MHI rings
+ * @state_work: State transition worker
+ * @ring_work: Ring worker
+ * @reset_work: Worker for MHI Endpoint reset
+ * @ch_db_list: List of queued channel doorbells
+ * @st_transition_list: List of state transitions
+ * @list_lock: Lock for protecting state transition and channel doorbell lists
+ * @state_lock: Lock for protecting state transitions
+ * @event_lock: Lock for protecting event rings
+ * @chdb: Array of channel doorbell interrupt info
+ * @raise_irq: CB function for raising IRQ to the host
+ * @alloc_addr: CB function for allocating memory in endpoint for storing host context
+ * @map_addr: CB function for mapping host context to endpoint
+ * @free_addr: CB function to free the allocated memory in endpoint for storing host context
+ * @unmap_addr: CB function to unmap the host context in endpoint
+ * @mhi_state: MHI Endpoint state
+ * @max_chan: Maximum channels supported by the endpoint controller
+ * @event_rings: Number of event rings supported by the endpoint controller
+ * @hw_event_rings: Number of hardware event rings supported by the endpoint controller
+ * @chdb_offset: Channel doorbell offset set by the host
+ * @erdb_offset: Event ring doorbell offset set by the host
+ * @index: MHI Endpoint controller index
+ * @irq: IRQ used by the endpoint controller
+ * @is_enabled: Check if the endpoint controller is enabled or not
+ */
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_sm *sm;
+
+ struct mhi_chan_ctxt *ch_ctx_cache;
+ struct mhi_event_ctxt *ev_ctx_cache;
+ struct mhi_cmd_ctxt *cmd_ctx_cache;
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+ phys_addr_t ch_ctx_cache_phys;
+ phys_addr_t ev_ctx_cache_phys;
+ phys_addr_t cmd_ctx_cache_phys;
+ size_t ch_ctx_host_size;
+ size_t ev_ctx_host_size;
+ size_t cmd_ctx_host_size;
+
+ struct workqueue_struct *state_wq;
+ struct workqueue_struct *ring_wq;
+ struct work_struct state_work;
+ struct work_struct ring_work;
+ struct work_struct reset_work;
+
+ struct list_head ch_db_list;
+ struct list_head st_transition_list;
+ spinlock_t list_lock;
+ spinlock_t state_lock;
+ struct mutex event_lock;
+ struct mhi_ep_db_info chdb[4];
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl);
+ void __iomem *(*alloc_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t *phys_addr, size_t size);
+ int (*map_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, u64 pci_addr, size_t size);
+ void (*free_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size);
+ void (*unmap_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr);
+
+ enum mhi_state mhi_state;
+
+ u32 max_chan;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 chdb_offset;
+ u32 erdb_offset;
+ int index;
+ int irq;
+ bool is_enabled;
+};
+
+/**
+ * struct mhi_ep_device - Structure representing an MHI Endpoint device that binds
+ * to channels or is associated with controllers
+ * @dev: Driver model device node for the MHI Endpoint device
+ * @mhi_cntrl: Controller the device belongs to
+ * @id: Pointer to MHI Endpoint device ID struct
+ * @name: Name of the associated MHI Endpoint device
+ * @ul_chan: UL channel for the device
+ * @dl_chan: DL channel for the device
+ * @dev_type: MHI device type
+ * @ul_chan_id: Channel id for UL transfer
+ * @dl_chan_id: Channel id for DL transfer
+ */
+struct mhi_ep_device {
+ struct device dev;
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+};
+
+/**
+ * struct mhi_ep_driver - Structure representing a MHI Endpoint client driver
+ * @id_table: Pointer to MHI Endpoint device ID table
+ * @driver: Device driver model driver
+ * @probe: CB function for client driver probe function
+ * @remove: CB function for client driver remove function
+ * @ul_xfer_cb: CB function for UL data transfer
+ * @dl_xfer_cb: CB function for DL data transfer
+ */
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+/**
+ * __mhi_ep_driver_register - Register a driver with MHI Endpoint bus
+ * @mhi_drv: Driver to be associated with the device
+ * @owner: The module owner
+ *
+ * Return: 0 if driver registrations succeeds, a negative error code otherwise.
+ */
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+
+/**
+ * mhi_ep_driver_unregister - Unregister a driver from MHI Endpoint bus
+ * @mhi_drv: Driver associated with the device
+ */
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+/**
+ * mhi_ep_register_controller - Register MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to register
+ * @config: Configuration to use for the controller
+ *
+ * Return: 0 if controller registrations succeeds, a negative error code otherwise.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+
+/**
+ * mhi_ep_unregister_controller - Unregister MHI Endpoint controller
+ * @mhi_cntrl: MHI Endpoint controller to unregister
+ */
+void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_up - Power up the MHI endpoint stack
+ * @mhi_cntrl: MHI Endpoint controller
+ *
+ * Return: 0 if power up succeeds, a negative error code otherwise.
+ */
+int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_power_down - Power down the MHI endpoint stack
+ * @mhi_cntrl: MHI controller
+ */
+void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_queue_is_empty - Determine whether the transfer queue is empty
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ *
+ * Return: true if the queue is empty, false otherwise.
+ */
+bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir);
+
+/**
+ * mhi_ep_queue_skb - Send SKBs to host over MHI Endpoint
+ * @mhi_dev: Device associated with the channels
+ * @dir: DMA direction for the channel
+ * @skb: Buffer for holding SKBs
+ * @len: Buffer length
+ * @mflags: MHI Endpoint transfer flags used for the transfer
+ *
+ * Return: 0 if the SKBs has been sent successfully, a negative error code otherwise.
+ */
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+#endif
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 4bb71979a8fd..0cff19bd72bf 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -835,6 +835,8 @@ struct wmi_device_id {
#define MHI_DEVICE_MODALIAS_FMT "mhi:%s"
#define MHI_NAME_SIZE 32
+#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s"
+
/**
* struct mhi_device_id - MHI device identification
* @chan: MHI channel name
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index a48778e1a4ee..8454610df4c3 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -198,6 +198,12 @@ pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb)
return atomic_notifier_chain_register(&epc->notifier, nb);
}
+static inline int
+pci_epc_unregister_notifier(struct pci_epc *epc, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&epc->notifier, nb);
+}
+
struct pci_epc *
__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
struct module *owner);
@@ -209,7 +215,9 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_bme_notify(struct pci_epc *epc);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 009a07147c61..d83f11d44530 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -20,6 +20,8 @@ enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
LINK_UP,
+ LINK_DOWN,
+ BME,
};
enum pci_barno {
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 5258247d78ac..d9d6a31446ea 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -1391,6 +1391,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias)
return 1;
}
+/* Looks like: mhi_ep:S */
+static int do_mhi_ep_entry(const char *filename, void *symval, char *alias)
+{
+ DEF_FIELD_ADDR(symval, mhi_device_id, chan);
+ sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan);
+
+ return 1;
+}
+
/* Looks like: ishtp:{guid} */
static int do_ishtp_entry(const char *filename, void *symval, char *alias)
{
@@ -1519,6 +1528,7 @@ static const struct devtable devtable[] = {
{"tee", SIZE_tee_client_device_id, do_tee_entry},
{"wmi", SIZE_wmi_device_id, do_wmi_entry},
{"mhi", SIZE_mhi_device_id, do_mhi_entry},
+ {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry},
{"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry},
{"ssam", SIZE_ssam_device_id, do_ssam_entry},
{"dfl", SIZE_dfl_device_id, do_dfl_entry},