aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinaro CI <ci_notify@linaro.org>2021-09-15 04:01:11 +0000
committerLinaro CI <ci_notify@linaro.org>2021-09-15 04:01:11 +0000
commitb743cba6a34b6e1c0d40ee52bc865fbdce50f115 (patch)
tree108ba1a1112d2d3ba0042ca111b58631c32d3050
parent7989e3ab8770e1c43f15d4567100b665814c4da8 (diff)
parent66c31e4738eeb7d3f467a7abe8c39bc6db335e7c (diff)
Merge remote-tracking branch 'sdx55-drivers/tracking-qcomlt-sdx55-drivers' into integration-linux-qcomlt
# Conflicts: # net/qrtr/qrtr.c
-rw-r--r--Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml158
-rw-r--r--MAINTAINERS10
-rw-r--r--drivers/bus/mhi/Kconfig10
-rw-r--r--drivers/bus/mhi/Makefile3
-rw-r--r--drivers/bus/mhi/ep/Makefile3
-rw-r--r--drivers/bus/mhi/ep/internal.h988
-rw-r--r--drivers/bus/mhi/ep/main.c1247
-rw-r--r--drivers/bus/mhi/ep/mmio.c386
-rw-r--r--drivers/bus/mhi/ep/ring.c366
-rw-r--r--drivers/bus/mhi/ep/sm.c436
-rw-r--r--drivers/pci/controller/dwc/Kconfig10
-rw-r--r--drivers/pci/controller/dwc/Makefile1
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c711
-rw-r--r--drivers/pci/endpoint/functions/Kconfig9
-rw-r--r--drivers/pci/endpoint/functions/Makefile1
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-mhi.c338
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c89
-rw-r--r--include/linux/mhi_ep.h147
-rw-r--r--include/linux/pci-epc.h5
-rw-r--r--include/linux/pci-epf.h5
-rw-r--r--net/qrtr/Kconfig7
-rw-r--r--net/qrtr/Makefile2
-rw-r--r--net/qrtr/mhi-ep.c145
-rw-r--r--net/qrtr/ns.c46
-rw-r--r--net/qrtr/qrtr.c233
25 files changed, 5306 insertions, 50 deletions
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
new file mode 100644
index 000000000000..9fe6d1cef767
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/qcom,pcie-ep.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm PCIe Endpoint Controller binding
+
+maintainers:
+ - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+allOf:
+ - $ref: "pci-ep.yaml#"
+
+properties:
+ compatible:
+ const: qcom,sdx55-pcie-ep
+
+ reg:
+ items:
+ - description: Qualcomm specific PARF configuration registers
+ - description: Designware PCIe registers
+ - description: External local bus interface registers
+ - description: Address Translation Unit (ATU) registers
+ - description: Memory region used to map remote RC address space
+ - description: BAR memory region
+
+ reg-names:
+ items:
+ - const: parf
+ - const: dbi
+ - const: elbi
+ - const: atu
+ - const: addr_space
+ - const: mmio
+
+ clocks:
+ items:
+ - description: PCIe Auxiliary clock
+ - description: PCIe CFG AHB clock
+ - description: PCIe Master AXI clock
+ - description: PCIe Slave AXI clock
+ - description: PCIe Slave Q2A AXI clock
+ - description: PCIe Sleep clock
+ - description: PCIe Reference clock
+
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg
+ - const: bus_master
+ - const: bus_slave
+ - const: slave_q2a
+ - const: sleep
+ - const: ref
+
+ qcom,perst-regs:
+ description: Reference to a syscon representing TCSR followed by the two
+ offsets within syscon for Perst enable and Perst separation
+ enable registers
+ $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ items:
+ minItems: 3
+ maxItems: 3
+
+ interrupts:
+ items:
+ - description: PCIe Global interrupt
+ - description: PCIe Doorbell interrupt
+
+ interrupt-names:
+ items:
+ - const: global
+ - const: doorbell
+
+ reset-gpios:
+ description: GPIO that is being used as PERST# input signal
+ maxItems: 1
+
+ wake-gpios:
+ description: GPIO that is being used as WAKE# output signal
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: core
+
+ power-domains:
+ maxItems: 1
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: pciephy
+
+ num-lanes:
+ default: 2
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - qcom,perst-regs
+ - interrupts
+ - interrupt-names
+ - reset-gpios
+ - resets
+ - reset-names
+ - power-domains
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,gcc-sdx55.h>
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pcie_ep: pcie-ep@40000000 {
+ compatible = "qcom,sdx55-pcie-ep";
+ reg = <0x01c00000 0x3000>,
+ <0x40000000 0xf1d>,
+ <0x40000f20 0xc8>,
+ <0x40001000 0x1000>,
+ <0x40002000 0x1000>,
+ <0x01c03000 0x3000>;
+ reg-names = "parf", "dbi", "elbi", "atu", "addr_space",
+ "mmio";
+
+ clocks = <&gcc GCC_PCIE_AUX_CLK>,
+ <&gcc GCC_PCIE_CFG_AHB_CLK>,
+ <&gcc GCC_PCIE_MSTR_AXI_CLK>,
+ <&gcc GCC_PCIE_SLV_AXI_CLK>,
+ <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>,
+ <&gcc GCC_PCIE_SLEEP_CLK>,
+ <&gcc GCC_PCIE_0_CLKREF_CLK>;
+ clock-names = "aux", "cfg", "bus_master", "bus_slave",
+ "slave_q2a", "sleep", "ref";
+
+ qcom,perst-regs = <&tcsr 0xb258 0xb270>;
+
+ interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "global", "doorbell";
+ reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>;
+ wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>;
+ resets = <&gcc GCC_PCIE_BCR>;
+ reset-names = "core";
+ power-domains = <&gcc PCIE_GDSC>;
+ phys = <&pcie0_lane>;
+ phy-names = "pciephy";
+ max-link-speed = <3>;
+ num-lanes = <2>;
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index eeb4c70b3d5b..2c9165e4e816 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -14595,7 +14595,15 @@ M: Stanimir Varbanov <svarbanov@mm-sol.com>
L: linux-pci@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
-F: drivers/pci/controller/dwc/*qcom*
+F: drivers/pci/controller/dwc/pcie-qcom.c
+
+PCIE ENDPOINT DRIVER FOR QUALCOMM
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+L: linux-pci@vger.kernel.org
+L: linux-arm-msm@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml
+F: drivers/pci/controller/dwc/pcie-qcom-ep.c
PCIE DRIVER FOR ROCKCHIP
M: Shawn Lin <shawn.lin@rock-chips.com>
diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig
index da5cd0c9fc62..2d581124ea38 100644
--- a/drivers/bus/mhi/Kconfig
+++ b/drivers/bus/mhi/Kconfig
@@ -6,7 +6,15 @@
#
config MHI_BUS
- tristate "Modem Host Interface (MHI) bus"
+ tristate "Modem Host Interface (MHI) bus host implementation"
+ help
+ Bus driver for MHI protocol. Modem Host Interface (MHI) is a
+ communication protocol used by the host processors to control
+ and communicate with modem devices over a high speed peripheral
+ bus or shared memory.
+
+config MHI_BUS_EP
+ tristate "Modem Host Interface (MHI) bus Endpoint implementation"
help
Bus driver for MHI protocol. Modem Host Interface (MHI) is a
communication protocol used by the host processors to control
diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile
index 0a2d778d6fb4..dc242a71ac67 100644
--- a/drivers/bus/mhi/Makefile
+++ b/drivers/bus/mhi/Makefile
@@ -1,6 +1,9 @@
# core layer
obj-y += core/
+# endpoint layer
+obj-y += ep/
+
obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o
mhi_pci_generic-y += pci_generic.o
diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile
new file mode 100644
index 000000000000..41f660b4af7d
--- /dev/null
+++ b/drivers/bus/mhi/ep/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o
+
+mhi_ep-y := main.o mmio.o sm.o ring.o
diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h
new file mode 100644
index 000000000000..d8537115935d
--- /dev/null
+++ b/drivers/bus/mhi/ep/internal.h
@@ -0,0 +1,988 @@
+#ifndef _MHI_EP_INTERNAL_
+#define _MHI_EP_INTERNAL_
+
+#include <linux/bitfield.h>
+#include <linux/mhi.h>
+#include <linux/types.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+extern struct bus_type mhi_ep_bus_type;
+
+/* MHI register definition */
+#define MHI_CTRL_INT_STATUS_A7 0x4
+#define MHI_CHDB_INT_STATUS_A7_n(n) (0x28 + 0x4 * (n))
+#define MHI_ERDB_INT_STATUS_A7_n(n) (0x38 + 0x4 * (n))
+
+#define MHI_CTRL_INT_CLEAR_A7 0x4c
+#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2)
+#define MHI_CTRL_INT_CRDB_CLEAR BIT(1)
+#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0)
+
+#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x70 + 0x4 * (n))
+#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x80 + 0x4 * (n))
+#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0)
+
+#define MHI_CTRL_INT_MASK_A7 0x94
+#define MHI_CTRL_INT_MASK_A7_MASK_MASK GENMASK(1, 0)
+#define MHI_CTRL_MHICTRL_MASK BIT(0)
+#define MHI_CTRL_MHICTRL_SHFT 0
+#define MHI_CTRL_CRDB_MASK BIT(1)
+#define MHI_CTRL_CRDB_SHFT 1
+
+#define MHI_CHDB_INT_MASK_A7_n(n) (0xb8 + 0x4 * (n))
+#define MHI_CHDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0)
+#define MHI_ERDB_INT_MASK_A7_n(n) (0xc8 + 0x4 * (n))
+#define MHI_ERDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0)
+
+#define MHIREGLEN 0x100
+#define MHIVER 0x108
+
+#define MHICFG 0x110
+#define MHICFG_NHWER_MASK GENMASK(31, 24)
+#define MHICFG_NER_MASK GENMASK(23, 16)
+#define MHICFG_RESERVED_BITS15_8_MASK GENMASK(15, 8)
+#define MHICFG_NCH_MASK GENMASK(7, 0)
+
+#define CHDBOFF 0x118
+#define ERDBOFF 0x120
+#define BHIOFF 0x128
+#define DEBUGOFF 0x130
+
+#define MHICTRL 0x138
+#define MHICTRL_MHISTATE_MASK GENMASK(15, 8)
+#define MHICTRL_RESET_MASK BIT(1)
+#define MHICTRL_RESET_SHIFT 1
+
+#define MHISTATUS 0x148
+#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8)
+#define MHISTATUS_MHISTATE_SHIFT 8
+#define MHISTATUS_SYSERR_MASK BIT(2)
+#define MHISTATUS_SYSERR_SHIFT 2
+#define MHISTATUS_READY_MASK BIT(0)
+#define MHISTATUS_READY_SHIFT 0
+
+#define CCABAP_LOWER 0x158
+#define CCABAP_HIGHER 0x15C
+#define ECABAP_LOWER 0x160
+#define ECABAP_HIGHER 0x164
+#define CRCBAP_LOWER 0x168
+#define CRCBAP_HIGHER 0x16C
+#define CRDB_LOWER 0x170
+#define CRDB_HIGHER 0x174
+#define MHICTRLBASE_LOWER 0x180
+#define MHICTRLBASE_HIGHER 0x184
+#define MHICTRLLIMIT_LOWER 0x188
+#define MHICTRLLIMIT_HIGHER 0x18C
+#define MHIDATABASE_LOWER 0x198
+#define MHIDATABASE_HIGHER 0x19C
+#define MHIDATALIMIT_LOWER 0x1A0
+#define MHIDATALIMIT_HIGHER 0x1A4
+#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n))
+#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n))
+#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n))
+#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n))
+#define BHI_INTVEC 0x220
+#define BHI_EXECENV 0x228
+#define BHI_IMGTXDB 0x218
+
+#define NR_OF_CMD_RINGS 1
+#define NUM_EVENT_RINGS 128
+#define NUM_HW_EVENT_RINGS 2
+#define NUM_CHANNELS 128
+#define HW_CHANNEL_BASE 100
+#define NUM_HW_CHANNELS 15
+#define HW_CHANNEL_END 110
+#define MHI_ENV_VALUE 2
+#define MHI_MASK_ROWS_CH_EV_DB 4
+#define TRB_MAX_DATA_SIZE 8192
+#define MHI_CTRL_STATE 100
+
+#define MHI_NET_DEFAULT_MTU 8192
+
+struct mhi_ep_chan;
+extern struct bus_type mhi_ep_bus_type;
+
+enum cb_reason {
+ MHI_EP_TRE_AVAILABLE = 0,
+ MHI_EP_CTRL_UPDATE,
+};
+
+enum mhi_ep_ctrl_info {
+ MHI_EP_STATE_CONFIGURED,
+ MHI_EP_STATE_CONNECTED,
+ MHI_EP_STATE_DISCONNECTED,
+ MHI_EP_STATE_INVAL,
+};
+
+#if 0
+/* Channel state */
+enum mhi_ep_ch_state {
+ MHI_EP_CH_STATE_DISABLED,
+ MHI_EP_CH_STATE_ENABLED,
+ MHI_EP_CH_STATE_RUNNING,
+ MHI_EP_CH_STATE_SUSPENDED,
+ MHI_EP_CH_STATE_STOP,
+ MHI_EP_CH_STATE_ERROR,
+};
+
+#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0)
+#define CHAN_CTX_CHSTATE_SHIFT 0
+#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8)
+#define CHAN_CTX_BRSTMODE_SHIFT 8
+#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10)
+#define CHAN_CTX_POLLCFG_SHIFT 10
+#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16)
+struct mhi_ep_ch_ctx {
+ __u32 chcfg;
+ __u32 chtype;
+ __u32 erindex;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+#endif
+/* Channel context state */
+enum mhi_ep_ch_ctx_state {
+ MHI_EP_CH_STATE_DISABLED,
+ MHI_EP_CH_STATE_ENABLED,
+ MHI_EP_CH_STATE_RUNNING,
+ MHI_EP_CH_STATE_SUSPENDED,
+ MHI_EP_CH_STATE_STOP,
+ MHI_EP_CH_STATE_ERROR,
+ MHI_EP_CH_STATE_RESERVED,
+ MHI_EP_CH_STATE_32BIT = 0x7FFFFFFF
+};
+
+/* Channel type */
+enum mhi_ep_ch_ctx_type {
+ MHI_EP_CH_TYPE_NONE,
+ MHI_EP_CH_TYPE_OUTBOUND_CHANNEL,
+ MHI_EP_CH_TYPE_INBOUND_CHANNEL,
+ MHI_EP_CH_RESERVED
+};
+
+struct mhi_ep_ch_ctx {
+ enum mhi_ep_ch_ctx_state ch_state;
+ enum mhi_ep_ch_ctx_type ch_type;
+ uint32_t err_indx;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Event context interrupt moderation */
+enum mhi_ep_evt_ctx_int_mod_timer {
+ MHI_EP_EVT_INT_MODERATION_DISABLED
+};
+
+/* Event ring type */
+enum mhi_ep_evt_ctx_event_ring_type {
+ MHI_EP_EVT_TYPE_DEFAULT,
+ MHI_EP_EVT_TYPE_VALID,
+ MHI_EP_EVT_RESERVED
+};
+
+#if 0
+/* Event ring context type */
+#define EV_CTX_RESERVED_MASK GENMASK(7, 0)
+#define EV_CTX_INTMODC_MASK GENMASK(15, 8)
+#define EV_CTX_INTMODC_SHIFT 8
+#define EV_CTX_INTMODT_MASK GENMASK(31, 16)
+#define EV_CTX_INTMODT_SHIFT 16
+struct mhi_ep_ev_ctx {
+ __u32 intmod;
+ __u32 ertype;
+ __u32 msivec;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+/* Command context */
+struct mhi_ep_cmd_ctx {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+
+/* generic context */
+struct mhi_ep_gen_ctx {
+ __u32 reserved0;
+ __u32 reserved1;
+ __u32 reserved2;
+
+ __u64 rbase __packed __aligned(4);
+ __u64 rlen __packed __aligned(4);
+ __u64 rp __packed __aligned(4);
+ __u64 wp __packed __aligned(4);
+};
+#endif
+
+/* Event ring context type */
+struct mhi_ep_ev_ctx {
+ uint32_t res1:16;
+ enum mhi_ep_evt_ctx_int_mod_timer intmodt:16;
+ enum mhi_ep_evt_ctx_event_ring_type ertype;
+ uint32_t msivec;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* Command context */
+struct mhi_ep_cmd_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+/* generic context */
+struct mhi_ep_gen_ctx {
+ uint32_t res1;
+ uint32_t res2;
+ uint32_t res3;
+ uint64_t rbase;
+ uint64_t rlen;
+ uint64_t rp;
+ uint64_t wp;
+} __packed;
+
+enum mhi_ep_ring_element_type_id {
+ MHI_EP_RING_EL_INVALID = 0,
+ MHI_EP_RING_EL_NOOP = 1,
+ MHI_EP_RING_EL_TRANSFER = 2,
+ MHI_EP_RING_EL_RESET = 16,
+ MHI_EP_RING_EL_STOP = 17,
+ MHI_EP_RING_EL_START = 18,
+ MHI_EP_RING_EL_MHI_STATE_CHG = 32,
+ MHI_EP_RING_EL_CMD_COMPLETION_EVT = 33,
+ MHI_EP_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
+ MHI_EP_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
+ MHI_EP_RING_EL_UNDEF
+};
+
+enum mhi_ep_ring_state {
+ RING_STATE_UINT = 0,
+ RING_STATE_IDLE,
+ RING_STATE_PENDING,
+};
+
+enum mhi_ep_ring_type {
+ RING_TYPE_CMD = 0,
+ RING_TYPE_ER,
+ RING_TYPE_CH,
+ RING_TYPE_INVALID,
+};
+
+/* Transfer ring element */
+struct mhi_ep_transfer_ring_element {
+ u64 data_buf_ptr;
+ u32 len:16;
+ u32 res1:16;
+ u32 chain:1;
+ u32 res2:7;
+ u32 ieob:1;
+ u32 ieot:1;
+ u32 bei:1;
+ u32 res3:5;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 res4:8;
+} __packed;
+
+/* Command ring element */
+/* Command ring No op command */
+struct mhi_ep_cmd_ring_op {
+ u64 res1;
+ u32 res2;
+ u32 res3:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+/* Command ring reset channel command */
+struct mhi_ep_cmd_ring_reset_channel_cmd {
+ u64 res1;
+ u32 res2;
+ u32 res3:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+/* Command ring stop channel command */
+struct mhi_ep_cmd_ring_stop_channel_cmd {
+ u64 res1;
+ u32 res2;
+ u32 res3:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+/* Command ring start channel command */
+struct mhi_ep_cmd_ring_start_channel_cmd {
+ u64 res1;
+ u32 seqnum;
+ u32 reliable:1;
+ u32 res2:15;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+enum mhi_ep_cmd_completion_code {
+ MHI_CMD_COMPL_CODE_INVALID = 0,
+ MHI_CMD_COMPL_CODE_SUCCESS = 1,
+ MHI_CMD_COMPL_CODE_EOT = 2,
+ MHI_CMD_COMPL_CODE_OVERFLOW = 3,
+ MHI_CMD_COMPL_CODE_EOB = 4,
+ MHI_CMD_COMPL_CODE_UNDEFINED = 16,
+ MHI_CMD_COMPL_CODE_RING_EL = 17,
+ MHI_CMD_COMPL_CODE_RES
+};
+
+/* Event ring elements */
+/* Transfer completion event */
+struct mhi_ep_event_ring_transfer_completion {
+ u64 ptr;
+ u32 len:16;
+ u32 res1:8;
+ enum mhi_ep_cmd_completion_code code:8;
+ u32 res2:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+/* Command completion event */
+struct mhi_ep_event_ring_cmd_completion {
+ u64 ptr;
+ u32 res1:24;
+ enum mhi_ep_cmd_completion_code code:8;
+ u32 res2:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 res3:8;
+} __packed;
+
+/**
+ * enum mhi_ep_event_type - MHI state change events
+ * @MHI_EP_EVENT_CTRL_TRIG: CTRL register change event.
+ * Not supported,for future use
+ * @MHI_EP_EVENT_M0_STATE: M0 state change event
+ * @MHI_EP_EVENT_M1_STATE: M1 state change event. Not supported, for future use
+ * @MHI_EP_EVENT_M2_STATE: M2 state change event. Not supported, for future use
+ * @MHI_EP_EVENT_M3_STATE: M0 state change event
+ * @MHI_EP_EVENT_HW_ACC_WAKEUP: pendding data on IPA, initiate Host wakeup
+ * @MHI_EP_EVENT_CORE_WAKEUP: MHI core initiate Host wakup
+ */
+enum mhi_ep_event_type {
+ MHI_EP_EVENT_CTRL_TRIG,
+ MHI_EP_EVENT_M0_STATE,
+ MHI_EP_EVENT_M1_STATE,
+ MHI_EP_EVENT_M2_STATE,
+ MHI_EP_EVENT_M3_STATE,
+ MHI_EP_EVENT_HW_ACC_WAKEUP,
+ MHI_EP_EVENT_CORE_WAKEUP,
+ MHI_EP_EVENT_MAX
+};
+
+enum mhi_ep_state {
+ MHI_EP_RESET_STATE = 0,
+ MHI_EP_READY_STATE,
+ MHI_EP_M0_STATE,
+ MHI_EP_M1_STATE,
+ MHI_EP_M2_STATE,
+ MHI_EP_M3_STATE,
+ MHI_EP_MAX_STATE,
+ MHI_EP_SYSERR_STATE = 0xff
+};
+
+enum mhi_ep_pcie_state {
+ MHI_EP_PCIE_LINK_DISABLE,
+ MHI_EP_PCIE_D0_STATE,
+ MHI_EP_PCIE_D3_HOT_STATE,
+ MHI_EP_PCIE_D3_COLD_STATE,
+};
+
+enum mhi_ep_pcie_event {
+ MHI_EP_PCIE_EVENT_INVALID = 0,
+ MHI_EP_PCIE_EVENT_PM_D0 = 0x1,
+ MHI_EP_PCIE_EVENT_PM_D3_HOT = 0x2,
+ MHI_EP_PCIE_EVENT_PM_D3_COLD = 0x4,
+ MHI_EP_PCIE_EVENT_PM_RST_DEAST = 0x8,
+ MHI_EP_PCIE_EVENT_LINKDOWN = 0x10,
+ MHI_EP_PCIE_EVENT_LINKUP = 0x20,
+ MHI_EP_PCIE_EVENT_MHI_A7 = 0x40,
+ MHI_EP_PCIE_EVENT_MMIO_WRITE = 0x80,
+ MHI_EP_PCIE_EVENT_L1SUB_TIMEOUT = 0x100,
+ MHI_EP_PCIE_EVENT_L1SUB_TIMEOUT_EXIT = 0x200,
+};
+
+/* MHI state change event */
+struct mhi_ep_event_ring_state_change {
+ u64 ptr;
+ u32 res1:24;
+ enum mhi_ep_state mhistate:8;
+ u32 res2:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 res3:8;
+} __packed;
+
+enum mhi_ep_execenv {
+ MHI_EP_SBL_EE = 1,
+ MHI_EP_AMSS_EE = 2,
+ MHI_EP_UNRESERVED
+};
+
+/* EE state change event */
+struct mhi_ep_event_ring_ee_state_change {
+ u64 ptr;
+ u32 res1:24;
+ enum mhi_ep_execenv execenv:8;
+ u32 res2:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 res3:8;
+} __packed;
+
+/* Generic cmd to parse common details like type and channel id */
+struct mhi_ep_ring_generic {
+ u64 ptr;
+ u32 res1:24;
+ enum mhi_ep_state mhistate:8;
+ u32 res2:16;
+ enum mhi_ep_ring_element_type_id type:8;
+ u32 chid:8;
+} __packed;
+
+/* Possible ring element types */
+union mhi_ep_ring_element_type {
+ struct mhi_ep_cmd_ring_op cmd_no_op;
+ struct mhi_ep_cmd_ring_reset_channel_cmd cmd_reset;
+ struct mhi_ep_cmd_ring_stop_channel_cmd cmd_stop;
+ struct mhi_ep_cmd_ring_start_channel_cmd cmd_start;
+ struct mhi_ep_transfer_ring_element tre;
+ struct mhi_ep_event_ring_transfer_completion evt_tr_comp;
+ struct mhi_ep_event_ring_cmd_completion evt_cmd_comp;
+ struct mhi_ep_event_ring_state_change evt_state_change;
+ struct mhi_ep_event_ring_ee_state_change evt_ee_state;
+ struct mhi_ep_ring_generic generic;
+};
+
+/* Transfer ring element type */
+union mhi_ep_ring_ctx {
+ struct mhi_ep_cmd_ctx cmd;
+ struct mhi_ep_ev_ctx ev;
+ struct mhi_ep_ch_ctx ch;
+ struct mhi_ep_gen_ctx generic;
+};
+
+enum mhi_ep_tr_compl_evt_type {
+ SEND_EVENT_BUFFER,
+ SEND_EVENT_RD_OFFSET,
+ SEND_MSI
+};
+
+struct mhi_ep_ring {
+ enum mhi_ep_ring_type type;
+ enum mhi_ep_ring_state state;
+
+ u32 ch_id;
+ u32 db_offset_h;
+ u32 db_offset_l;
+ size_t rd_offset;
+ size_t wr_offset;
+ size_t ring_size;
+ struct list_head list;
+ struct mhi_ep_cntrl *mhi_cntrl;
+
+ /*
+ * Lock to prevent race in updating event ring
+ * which is shared by multiple channels
+ */
+ struct mutex event_lock;
+ /* Physical address of the cached ring copy on the device side */
+ dma_addr_t ring_cache_dma_handle;
+ /* Device VA of read pointer array (used only for event rings) */
+ u64 *evt_rp_cache;
+ /* PA of the read pointer array (used only for event rings) */
+ dma_addr_t evt_rp_cache_dma_handle;
+ /* Ring type - cmd, event, transfer ring and its rp/wp... */
+ union mhi_ep_ring_ctx *ring_ctx;
+ /* ring_ctx_shadow -> tracking ring_ctx in the host */
+ union mhi_ep_ring_ctx *ring_ctx_shadow;
+ int (*ring_cb)(struct mhi_ep_ring *ring, union mhi_ep_ring_element_type *el);
+ /* device virtual address location of the cached host ring ctx data */
+ union mhi_ep_ring_element_type *ring_cache;
+ /* Copy of the host ring */
+ union mhi_ep_ring_element_type *ring_shadow;
+ phys_addr_t ring_shadow_phys;
+};
+
+struct mhi_ep_cmd {
+ struct mhi_ep_ring ring;
+};
+
+struct mhi_ep_event {
+ struct mhi_ep_ring ring;
+ spinlock_t lock;
+};
+
+static inline void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring,
+ size_t rd_offset)
+{
+ ring->rd_offset++;
+ if (ring->rd_offset == ring->ring_size)
+ ring->rd_offset = 0;
+}
+
+/* trace information planned to use for read/write */
+#define TRACE_DATA_MAX 128
+#define MHI_EP_DATA_MAX 512
+
+#define MHI_EP_MMIO_RANGE 0xb80
+#define MHI_EP_MMIO_OFFSET 0x100
+
+struct ring_cache_req {
+ struct completion *done;
+ void *context;
+};
+
+struct event_req {
+ union mhi_ep_ring_element_type *tr_events;
+ /*
+ * Start index of the completion event buffer segment
+ * to be flushed to host
+ */
+ u32 start;
+ u32 num_events;
+ dma_addr_t dma;
+ u32 dma_len;
+ dma_addr_t event_rd_dma;
+ void *context;
+ enum mhi_ep_tr_compl_evt_type event_type;
+ u32 event_ring;
+ void (*client_cb)(void *req);
+ void (*rd_offset_cb)(void *req);
+ void (*msi_cb)(void *req);
+ struct list_head list;
+ u32 flush_num;
+};
+
+/**
+ * struct mhi_ep_sm - MHI state manager context information
+ * @mhi: TODO
+ * @lock: mutex for mhi_state
+ * @wq: workqueue for state change events
+ * @work:
+ * @state: MHI M state of the MHI device
+ * @d_state: EP-PCIe D state of the MHI device
+ */
+struct mhi_ep_sm {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ struct mutex lock;
+ struct workqueue_struct *sm_wq;
+ struct work_struct sm_work;
+ enum mhi_ep_state state;
+ enum mhi_ep_pcie_state d_state;
+};
+
+struct mhi_ep_chan {
+ char *name;
+ u32 chan;
+ struct mhi_ep_ring ring;
+ struct mhi_ep_device *mhi_dev;
+ enum mhi_ep_ch_ctx_state state;
+ enum dma_data_direction dir;
+ struct mutex lock;
+
+ /* Channel specific callbacks */
+ void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result);
+
+ bool configured;
+ bool skip_td;
+ /* current TRE being processed */
+ uint64_t tre_loc;
+ /* current TRE size */
+ uint32_t tre_size;
+ /* tre bytes left to read/write */
+ uint32_t tre_bytes_left;
+
+ /* TODO */
+ void __iomem *tre_buf;
+ phys_addr_t tre_phys;
+};
+
+/* MHI Ring related functions */
+
+int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, union mhi_ep_ring_element_type *el);
+int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, union mhi_ep_ring_element_type *el);
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id);
+/**
+ * mhi_ep_ring_start() - Fetches the respective transfer ring's context from
+ * the host and updates the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @ctx: Transfer ring of type mhi_ep_ring_ctx.
+ */
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx);
+
+/**
+ * mhi_ep_update_wr_offset() - Check for any updates in the write offset.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring);
+
+/**
+ * mhi_ep_process_ring() - Update the Write pointer, fetch the ring elements
+ * and invoke the clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ */
+int mhi_ep_process_ring(struct mhi_ep_ring *ring);
+
+/**
+ * mhi_ep_process_ring_element() - Fetch the ring elements and invoke the
+ * clients callback.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @offset: Offset index into the respective ring's cache element.
+ */
+int mhi_ep_process_ring_element(struct mhi_ep_ring *ring, size_t offset);
+
+/**
+ * mhi_ep_ring_add_element() - Copy the element to the respective transfer rings
+ * read pointer and increment the index.
+ * @ring: Ring for the respective context - Channel/Event/Command.
+ * @element: Transfer ring element to be copied to the host memory.
+ */
+int mhi_ep_ring_add_element(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_element_type *element,
+ struct event_req *ereq, int evt_offset);
+
+/* MMIO related functions */
+
+/**
+ * mhi_ep_mmio_read() - Generic MHI MMIO register read API.
+ * @mhi: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @regval: Pointer the register value is stored to.
+ */
+void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval);
+
+/**
+ * mhi_ep_mmio_read() - Generic MHI MMIO register write API.
+ * @mhi: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @val: Value to be written to the register offset.
+ */
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val);
+
+/**
+ * mhi_ep_mmio_masked_write() - Generic MHI MMIO register write masked API.
+ * @mhi: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Shift value
+ * @val: Value to be written to the register offset.
+ */
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset,
+ u32 mask, u32 shift, u32 val);
+
+/**
+ * mhi_ep_mmio_masked_read() - Generic MHI MMIO register read masked API.
+ * @dev: MHI device structure.
+ * @offset: MHI address offset from base.
+ * @mask: Register field mask.
+ * @shift: Register field mask shift value.
+ * @regval: Pointer the register value is stored to.
+ */
+int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset,
+ u32 mask, u32 shift, u32 *regval);
+
+/**
+ * mhi_ep_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
+ * @mhi: MHI device structure.
+ */
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_read_ctrl_status_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_read_cmdb_interrupt() - Read Command doorbell status.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_read_cmdb_status_interrupt(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
+ * channel id.
+ * @mhi: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id);
+/**
+ * mhi_ep_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
+ * channel id.
+ * @mhi: MHI device structure.
+ * @chdb_id: Channel id number.
+ */
+void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id);
+
+/**
+ * mhi_ep_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
+ * event ring id.
+ * @mhi: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+void mhi_ep_mmio_enable_erdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 erdb_id);
+
+/**
+ * mhi_ep_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
+ * event ring id.
+ * @mhi: MHI device structure.
+ * @erdb_id: Event ring id number.
+ */
+void mhi_ep_mmio_disable_erdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 erdb_id);
+
+/**
+ * mhi_ep_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_read_chdb_interrupts() - Read all Channel doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_enable_erdb_interrupts() - Enable all Event doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_enable_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ *mhi_ep_mmio_mask_erdb_interrupts() - Mask all Event doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_read_erdb_interrupts() - Read all Event doorbell
+ * interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_read_erdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_mask_interrupts() - Mask all MHI interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_clear_interrupts() - Clear all doorbell interrupts.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_get_chc_base() - Fetch the Channel ring context base address.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_get_erc_base() - Fetch the Event ring context base address.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_get_crc_base() - Fetch the Command ring context base address.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
+ * @mhi: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+
+/**
+ * mhi_ep_get_er_db() - Fetch the Write offset of the Event ring ID.
+ * @mhi: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+
+/**
+ * mhi_ep_get_cmd_base() - Fetch the Write offset of the Command ring ID.
+ * @mhi: MHI device structure.
+ * @wr_offset: Pointer of the write offset to be written to.
+ */
+void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_offset);
+
+/**
+ * mhi_ep_mmio_set_env() - Write the Execution Enviornment.
+ * @mhi: MHI device structure.
+ * @value: Value of the EXEC EVN.
+ */
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value);
+
+/**
+ * mhi_ep_mmio_clear_reset() - Clear the reset bit
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_reset() - Reset the MMIO done as part of initialization.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_get_mhi_addr() - Fetches the Data and Control region from the Host.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_get_mhi_addr(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
+ * @mhi: MHI device structure.
+ * @state: Pointer of type mhi_ep_state
+ * @mhi_reset: MHI device reset from host.
+ */
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_state *state,
+ bool *mhi_reset);
+
+/**
+ * mhi_ep_mmio_init() - Initializes the MMIO and reads the Number of event
+ * rings, support number of channels, and offsets to the Channel
+ * and Event doorbell from the host.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_mmio_update_ner() - Update the number of event rings (NER) programmed by
+ * the host.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_restore_mmio(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_backup_mmio() - Backup MMIO before a MHI transition to M3.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_backup_mmio(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_dump_mmio() - Memory dump of the MMIO region for debug.
+ * @mhi: MHI device structure.
+ */
+void mhi_ep_dump_mmio(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_send_state_change_event() - Send state change event to the host
+ * such as M0/M1/M2/M3.
+ * @mhi: MHI device structure.
+ * @state: MHI state of type mhi_ep_state
+ */
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_ep_state state);
+/**
+ * mhi_ep_send_ee_event() - Send Execution enviornment state change
+ * event to the host.
+ * @mhi: MHI device structure.
+ * @state: MHI state of type mhi_ep_execenv
+ */
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_ep_execenv exec_env);
+/**
+ * mhi_ep_syserr() - System error when unexpected events are received.
+ * @mhi: MHI device structure.
+ */
+int mhi_ep_syserr(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_suspend() - MHI device suspend to stop channel processing at the
+ * Transfer ring boundary, update the channel state to suspended.
+ * @mhi: MHI device structure.
+ */
+int mhi_ep_suspend(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_resume() - MHI device resume to update the channel state to running.
+ * @mhi: MHI device structure.
+ */
+int mhi_ep_resume(struct mhi_ep_cntrl *mhi_cntrl);
+
+/**
+ * mhi_ep_trigger_hw_acc_wakeup() - Notify State machine there is HW
+ * accelerated data to be send and prevent MHI suspend.
+ * @mhi: MHI device structure.
+ */
+int mhi_ep_trigger_hw_acc_wakeup(struct mhi_ep_cntrl *mhi_cntrl);
+
+int mhi_ep_sm_init(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_sm_exit(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_sm_set_ready(struct mhi_ep_cntrl *mhi_cntrl);
+int mhi_ep_notify_sm_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_event_type event);
+int mhi_ep_sm_get_mhi_state(enum mhi_ep_state *state);
+
+#endif
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
new file mode 100644
index 000000000000..6a30fcbd5d8e
--- /dev/null
+++ b/drivers/bus/mhi/ep/main.c
@@ -0,0 +1,1247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Endpoint Function Driver for MHI device
+ *
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/io.h>
+#include <linux/mhi_ep.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+
+#include "internal.h"
+
+#define MHI_EP_BAR_NUM 0
+#define MHI_EP_MSI_COUNT 4
+#define MHI_EP_VERSION 0x1000000
+
+/* Wait time on the device for Host to set M0 state */
+#define MHI_EP_M0_MAX_CNT 30
+/* Wait time before suspend/resume is complete */
+#define MHI_SUSPEND_MIN 100
+#define MHI_SUSPEND_TIMEOUT 600
+/* Wait time on the device for Host to set BHI_INTVEC */
+#define MHI_BHI_INTVEC_MAX_CNT 200
+#define MHI_BHI_INTVEC_WAIT_MS 50
+#define MHI_MASK_CH_EV_LEN 32
+#define MHI_RING_CMD_ID 0
+
+#define MHI_MMIO_CTRL_INT_STATUS_A7_MSK 0x1
+#define MHI_MMIO_CTRL_CRDB_STATUS_MSK 0x2
+
+#define HOST_ADDR(lsb, msb) ((lsb) | ((u64)(msb) << 32))
+
+int mhi_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id);
+
+int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 evnt_ring,
+ union mhi_ep_ring_element_type *el)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ring = &mhi_cntrl->mhi_event[evnt_ring].ring;
+ union mhi_ep_ring_ctx *ctx;
+ int ret = 0;
+ unsigned long flags;
+
+ mutex_lock(&mhi_cntrl->event_lock);
+
+ ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[evnt_ring];
+ if (ring->state == RING_STATE_UINT) {
+ dev_dbg(dev, "ring (%d) init!!!", ring->type);
+ ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx);
+ if (ret) {
+ dev_err(dev,
+ "error starting event ring %d\n", evnt_ring);
+ spin_unlock_irqrestore(&mhi_cntrl->mhi_event[0].lock, flags);
+ return ret;
+ }
+ }
+
+ /* add the ring element */
+ ret = mhi_ep_ring_add_element(mhi_cntrl, ring, el, NULL, 0);
+ if (ret) {
+ dev_err(dev, "Error adding ring element\n");
+ mutex_unlock(&mhi_cntrl->event_lock);
+ return ret;
+ }
+
+ /*
+ * rp update in host memory should be flushed
+ * before sending a MSI to the host
+ */
+ wmb();
+
+ mutex_unlock(&mhi_cntrl->event_lock);
+
+ dev_dbg(dev, "evnt ptr : 0x%llx\n", el->evt_tr_comp.ptr);
+ dev_dbg(dev, "evnt len : 0x%x\n", el->evt_tr_comp.len);
+ dev_dbg(dev, "evnt code :0x%x\n", el->evt_tr_comp.code);
+ dev_dbg(dev, "evnt type :0x%x\n", el->evt_tr_comp.type);
+ dev_dbg(dev, "evnt chid :0x%x\n", el->evt_tr_comp.chid);
+
+ mhi_cntrl->raise_irq(mhi_cntrl);
+
+ return 0;
+}
+
+static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring, uint32_t len,
+ enum mhi_ep_cmd_completion_code code)
+{
+ union mhi_ep_ring_element_type event = {};
+ u32 er_index;
+
+ er_index = mhi_cntrl->ch_ctx_cache[ring->ch_id].err_indx;
+ event.evt_tr_comp.chid = ring->ch_id;
+ event.evt_tr_comp.type =
+ MHI_EP_RING_EL_TRANSFER_COMPLETION_EVENT;
+ event.evt_tr_comp.len = len;
+ event.evt_tr_comp.code = code;
+ event.evt_tr_comp.ptr = ring->ring_ctx->generic.rbase +
+ ring->rd_offset * sizeof(struct mhi_ep_transfer_ring_element);
+
+ return mhi_ep_send_event(mhi_cntrl, er_index, &event);
+}
+
+int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_ep_state state)
+{
+ union mhi_ep_ring_element_type event = {};
+
+ event.evt_state_change.type = MHI_EP_RING_EL_MHI_STATE_CHG;
+ event.evt_state_change.mhistate = state;
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+EXPORT_SYMBOL(mhi_ep_send_state_change_event);
+
+int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_execenv exec_env)
+{
+ union mhi_ep_ring_element_type event = {};
+
+ event.evt_ee_state.type = MHI_EP_RING_EL_EE_STATE_CHANGE_NOTIFY;
+ event.evt_ee_state.execenv = exec_env;
+
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+EXPORT_SYMBOL(mhi_ep_send_ee_event);
+
+static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_ep_cmd_completion_code code)
+{
+ union mhi_ep_ring_element_type event = {};
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ if (code > MHI_CMD_COMPL_CODE_RES) {
+ dev_err(dev,
+ "Invalid cmd compl code: %d\n", code);
+ return -EINVAL;
+ }
+
+ /* send the command completion event to the host */
+ event.evt_cmd_comp.ptr = mhi_cntrl->cmd_ctx_cache->rbase
+ + (mhi_cntrl->mhi_cmd->ring.rd_offset *
+ (sizeof(union mhi_ep_ring_element_type)));
+ dev_dbg(dev, "evt cmd comp ptr :0x%x\n",
+ (size_t) event.evt_cmd_comp.ptr);
+ event.evt_cmd_comp.type = MHI_EP_RING_EL_CMD_COMPLETION_EVT;
+ event.evt_cmd_comp.code = code;
+ return mhi_ep_send_event(mhi_cntrl, 0, &event);
+}
+
+int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, union mhi_ep_ring_element_type *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ch_ring, *evt_ring;
+ struct mhi_ep_chan *chan;
+ union mhi_ep_ring_ctx *evt_ctx;
+ u32 ch_id = 0;
+ u32 evnt_ring_idx;
+ int ret;
+
+ ch_id = el->generic.chid;
+
+ switch (el->generic.type) {
+ case MHI_EP_RING_EL_START:
+ dev_dbg(dev, "recived start cmd for channel %d\n",ch_id);
+ ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ /* Initialize and configure the corresponding channel ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, ch_ring,
+ (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]);
+ if (ret) {
+ dev_err(dev,
+ "start ring failed for ch %d\n", ch_id);
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_CMD_COMPL_CODE_UNDEFINED);
+ if (ret)
+ dev_err(dev,
+ "Error with compl event\n");
+ return ret;
+ }
+
+ chan = &mhi_cntrl->mhi_chan[ch_id];
+ chan->state = MHI_EP_CH_STATE_ENABLED;
+
+ /* enable DB for event ring */
+ mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ch_id);
+
+ evnt_ring_idx = mhi_cntrl->ch_ctx_cache[ch_id].err_indx;
+ evt_ring = &mhi_cntrl->mhi_event[evnt_ring_idx].ring;
+ evt_ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[evnt_ring_idx];
+ if (evt_ring->state == RING_STATE_UINT) {
+ ret = mhi_ep_ring_start(mhi_cntrl, evt_ring, evt_ctx);
+ if (ret) {
+ dev_err(dev,
+ "error starting event ring %d\n",
+ mhi_cntrl->ch_ctx_cache[ch_id].err_indx);
+ return ret;
+ }
+ }
+ // mhi_ep_alloc_evt_buf_evt_req(mhi_cntrl, &mhi_cntrl->ch[ch_id],
+ // evt_ring);
+
+ mhi_cntrl->ch_ctx_cache[ch_id].ch_state = MHI_EP_CH_STATE_RUNNING;
+
+ ret = mhi_ep_send_cmd_comp_event(mhi_cntrl,
+ MHI_CMD_COMPL_CODE_SUCCESS);
+ if (ret) {
+ pr_err("Error sending command completion event\n");
+ return ret;
+ }
+
+ /* Create MHI device for the UL channel */
+ if (!(ch_id % 2)) {
+ ret = mhi_create_device(mhi_cntrl, ch_id);
+ if (ret) {
+ pr_err("Error creating device\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int mhi_ep_check_tre_bytes_left(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ union mhi_ep_ring_element_type *el)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ bool td_done = 0;
+
+ /*
+ * A full TRE worth of data was consumed.
+ * Check if we are at a TD boundary.
+ */
+ if (mhi_chan->tre_bytes_left == 0) {
+ if (el->tre.chain) {
+ if (el->tre.ieob)
+ mhi_ep_send_completion_event(mhi_cntrl,
+ ring, el->tre.len, MHI_CMD_COMPL_CODE_EOB);
+ } else {
+ if (el->tre.ieot)
+ mhi_ep_send_completion_event(mhi_cntrl,
+ ring, el->tre.len, MHI_CMD_COMPL_CODE_EOT);
+ td_done = 1;
+ }
+ mhi_ep_ring_inc_index(ring, ring->rd_offset);
+ mhi_chan->tre_bytes_left = 0;
+ mhi_chan->tre_loc = 0;
+ }
+
+ return td_done;
+}
+
+static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl,
+ struct mhi_ep_ring *ring,
+ struct mhi_result *result,
+ u32 len)
+{
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_element_type *el;
+ size_t bytes_to_read, addr_offset;
+ uint64_t read_from_loc;
+ ssize_t bytes_read = 0;
+ size_t write_to_loc;
+ uint32_t usr_buf_remaining;
+ bool td_done = 0;
+ int ret;
+
+ usr_buf_remaining = len;
+
+ mutex_lock(&mhi_chan->lock);
+ do {
+ el = &ring->ring_cache[ring->rd_offset];
+
+ if (mhi_chan->tre_loc) {
+ bytes_to_read = min(usr_buf_remaining,
+ mhi_chan->tre_bytes_left);
+ dev_dbg(dev, "remaining buffered data size %d", mhi_chan->tre_bytes_left);
+ } else {
+ if (ring->rd_offset == ring->wr_offset) {
+ dev_dbg(dev, "nothing to read, returning\n");
+ ret = 0;
+ goto exit;
+ }
+
+ mhi_chan->tre_loc = el->tre.data_buf_ptr;
+ mhi_chan->tre_size = el->tre.len;
+ mhi_chan->tre_bytes_left = mhi_chan->tre_size;
+
+ /* TODO change to min */
+ bytes_to_read = min(usr_buf_remaining, mhi_chan->tre_size);
+ }
+
+ bytes_read += bytes_to_read;
+ addr_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left;
+ read_from_loc = mhi_chan->tre_loc + addr_offset;
+ write_to_loc = (size_t) result->buf_addr + (len - usr_buf_remaining);
+ mhi_chan->tre_bytes_left -= bytes_to_read;
+
+ if (!mhi_chan->tre_buf) {
+ mhi_chan->tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_chan->tre_phys, bytes_to_read);
+ if (!mhi_chan->tre_buf) {
+ dev_err(dev, "Failed to allocate TRE buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_chan->tre_phys, read_from_loc, bytes_to_read);
+ if (ret) {
+ dev_err(dev, "Failed to map TRE buffer\n");
+ goto err_tre_free;
+ }
+
+ dev_dbg(dev, "Reading %d bytes from channel: %d", bytes_to_read, ring->ch_id);
+ memcpy_fromio((void *)write_to_loc, mhi_chan->tre_buf, bytes_to_read);
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_chan->tre_phys);
+
+ usr_buf_remaining -= bytes_to_read;
+ td_done = mhi_ep_check_tre_bytes_left(mhi_cntrl, ring, el);
+ } while(usr_buf_remaining && !td_done);
+
+ result->bytes_xferd = bytes_read;
+
+ mutex_unlock(&mhi_chan->lock);
+
+ return 0;
+
+err_tre_free:
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_chan->tre_phys, mhi_chan->tre_buf, bytes_to_read);
+exit:
+ mutex_unlock(&mhi_chan->lock);
+
+ return ret;
+}
+
+int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, union mhi_ep_ring_element_type *el)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_result result = {};
+ u32 len = MHI_NET_DEFAULT_MTU;
+ int ret;
+
+ if (ring->ch_id > mhi_cntrl->max_chan) {
+ dev_err(dev, "Invalid channel ring id: %d\n", ring->ch_id);
+ return -EINVAL;
+ }
+
+ dev_dbg(dev, "Processing TRE ring for channel: %d\n", ring->ch_id);
+
+ mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id];
+
+ if (ring->ch_id % 2) {
+ /* DL channel */
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ } else {
+ /* UL channel */
+ result.buf_addr = kzalloc(len, GFP_KERNEL);
+ if (!result.buf_addr)
+ return -EINVAL;
+
+ ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len);
+ if (ret) {
+ dev_err(dev, "Failed to read channel: %d\n", ring->ch_id);
+ return -EINVAL;
+ }
+
+ result.dir = mhi_chan->dir;
+ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
+ kfree(result.buf_addr);
+ }
+
+ return 0;
+}
+
+static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ phys_addr_t ch_ctx_cache_phys, ev_ctx_cache_phys, cmd_ctx_cache_phys;
+ size_t ch_ctx_host_size, ev_ctx_host_size, cmd_ctx_host_size;
+ int ret = 0;
+
+ /* Update the number of event rings (NER) programmed by the host */
+ mhi_ep_mmio_update_ner(mhi_cntrl);
+
+ ch_ctx_host_size = sizeof(struct mhi_ep_ch_ctx) *
+ mhi_cntrl->max_chan;
+ ev_ctx_host_size = sizeof(struct mhi_ep_ev_ctx) *
+ mhi_cntrl->event_rings;
+ cmd_ctx_host_size = sizeof(struct mhi_ep_cmd_ctx);
+ dev_dbg(dev, "Number of Event rings: %d, HW Event rings: %d\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ /* Get the channel context base pointer from host */
+ mhi_ep_mmio_get_chc_base(mhi_cntrl);
+
+ mhi_cntrl->ch_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &ch_ctx_cache_phys,
+ ch_ctx_host_size);
+ if (!mhi_cntrl->ch_ctx_cache) {
+ dev_err(dev, "Failed to allocate ch_ctx_cache address\n");
+ return -ENOMEM;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ch_ctx_cache_phys,
+ mhi_cntrl->ch_ctx_host_pa, ch_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map ch_ctx_cache address\n");
+ goto err_ch_ctx;
+ }
+
+ /* Get the event context base pointer from host */
+ mhi_ep_mmio_get_erc_base(mhi_cntrl);
+
+ mhi_cntrl->ev_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &ev_ctx_cache_phys,
+ ev_ctx_host_size);
+ if (!mhi_cntrl->ev_ctx_cache) {
+ dev_err(dev, "Failed to allocate ev_ctx_cache address\n");
+ ret = -ENOMEM;
+ goto err_ch_ctx_map;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ev_ctx_cache_phys,
+ mhi_cntrl->ev_ctx_host_pa, ev_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map ev_ctx_cache address\n");
+ goto err_ev_ctx;
+ }
+
+ /* Get the command context base pointer from host */
+ mhi_ep_mmio_get_crc_base(mhi_cntrl);
+
+ mhi_cntrl->cmd_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &cmd_ctx_cache_phys,
+ cmd_ctx_host_size);
+ if (!mhi_cntrl->cmd_ctx_cache) {
+ dev_err(dev, "Failed to allocate cmd_ctx_cache address\n");
+ ret = -ENOMEM;
+ goto err_ev_ctx_map;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, cmd_ctx_cache_phys,
+ mhi_cntrl->cmd_ctx_host_pa, cmd_ctx_host_size);
+ if (ret) {
+ dev_err(dev, "Failed to map address\n");
+ goto err_cmd_ctx;
+ }
+
+ dev_dbg(dev,
+ "cmd ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi_cntrl->cmd_ctx_cache->rbase,
+ mhi_cntrl->cmd_ctx_cache->rp,
+ mhi_cntrl->cmd_ctx_cache->wp);
+ dev_dbg(dev,
+ "ev ring_base:0x%llx, rp:0x%llx, wp:0x%llx\n",
+ mhi_cntrl->ev_ctx_cache->rbase,
+ mhi_cntrl->ev_ctx_cache->rp,
+ mhi_cntrl->ev_ctx_cache->wp);
+
+ /* Initialize command ring */
+ ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring,
+ (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache);
+ if (ret) {
+ dev_err(dev, "Failed to start the MHI ring\n");
+ goto err_cmd_ctx_map;
+ }
+
+ return ret;
+
+err_cmd_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, cmd_ctx_cache_phys);
+
+err_cmd_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, cmd_ctx_cache_phys, mhi_cntrl->cmd_ctx_cache,
+ cmd_ctx_host_size);
+
+err_ev_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, ev_ctx_cache_phys);
+
+err_ev_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, ev_ctx_cache_phys, mhi_cntrl->ev_ctx_cache,
+ ev_ctx_host_size);
+
+err_ch_ctx_map:
+ mhi_cntrl->unmap_addr(mhi_cntrl, ch_ctx_cache_phys);
+
+err_ch_ctx:
+ mhi_cntrl->free_addr(mhi_cntrl, ch_ctx_cache_phys, mhi_cntrl->ch_ctx_cache,
+ ch_ctx_host_size);
+
+ return ret;
+}
+
+static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_enable_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl);
+
+ enable_irq(mhi_cntrl->irq);
+}
+
+static void mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ep_state state;
+ u32 max_cnt = 0;
+ int ret, i;
+ bool mhi_reset;
+
+ /* Initialize command, channel and event rings */
+ mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0);
+ for (i = 0; i < mhi_cntrl->max_chan; i++)
+ mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i);
+ for (i = 0; i < mhi_cntrl->event_rings; i++) {
+ mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i);
+ }
+
+ /* Check if host has set M0 state */
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_dbg(dev, "Cleared reset before waiting for M0\n");
+ }
+
+ /* Wait for Host to set the M0 state if not done */
+ while (state != MHI_EP_M0_STATE && max_cnt < MHI_SUSPEND_TIMEOUT) {
+ msleep(MHI_SUSPEND_MIN);
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+ if (mhi_reset) {
+ mhi_ep_mmio_clear_reset(mhi_cntrl);
+ dev_dbg(dev, "Host initiated reset while waiting for M0\n");
+ }
+ max_cnt++;
+ }
+
+ if (state == MHI_EP_M0_STATE) {
+ ret = mhi_ep_cache_host_cfg(mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to cache the host config\n");
+ return;
+ }
+
+ /* TODO: Check if this is necessary */
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+ } else {
+ dev_err(dev, "MHI device failed to enter M0\n");
+ return;
+ }
+
+ mhi_ep_enable_int(mhi_cntrl);
+}
+
+static void mhi_ep_process_ring_pending(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work,
+ struct mhi_ep_cntrl, ring_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ring;
+ struct list_head *cp, *q;
+ int rc = 0;
+
+ mutex_lock(&mhi_cntrl->lock);
+ rc = mhi_ep_process_ring(&mhi_cntrl->mhi_cmd->ring);
+ if (rc) {
+ dev_err(dev, "error processing command ring\n");
+ goto exit;
+ }
+
+ list_for_each_safe(cp, q, &mhi_cntrl->process_ring_list) {
+ ring = list_entry(cp, struct mhi_ep_ring, list);
+ list_del(cp);
+ rc = mhi_ep_process_ring(ring);
+ if (rc) {
+ dev_err(dev,
+ "error processing channel ring: %d\n", ring->ch_id);
+ goto exit;
+ }
+
+ /* Enable channel interrupt */
+ mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ring->ch_id);
+ }
+
+exit:
+ mutex_unlock(&mhi_cntrl->lock);
+ return;
+}
+
+static int mhi_ep_get_event(enum mhi_ep_state state, enum mhi_ep_event_type *event)
+{
+ switch (state) {
+ case MHI_EP_M0_STATE:
+ *event = MHI_EP_EVENT_M0_STATE;
+ break;
+ case MHI_EP_M1_STATE:
+ *event = MHI_EP_EVENT_M1_STATE;
+ break;
+ case MHI_EP_M2_STATE:
+ *event = MHI_EP_EVENT_M2_STATE;
+ break;
+ case MHI_EP_M3_STATE:
+ *event = MHI_EP_EVENT_M3_STATE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl,
+ u32 chintr_value, uint32_t ch_num)
+{
+ struct mhi_ep_ring *ring;
+
+ for (; chintr_value; ch_num++, chintr_value >>= 1) {
+ if (chintr_value & 1) {
+ ring = &mhi_cntrl->mhi_chan[ch_num].ring;
+ ring->state = RING_STATE_PENDING;
+ list_add(&ring->list, &mhi_cntrl->process_ring_list);
+ /*
+ * Disable the channel interrupt here and enable it once
+ * the current interrupt got serviced
+ */
+ mhi_ep_mmio_disable_chdb_a7(mhi_cntrl, ch_num);
+ queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work);
+ }
+ }
+}
+
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u32 chintr_value = 0, ch_num = 0;
+ int i;
+
+ mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl);
+
+ dev_dbg(dev, "Checking for channel db");
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ ch_num = i * MHI_MASK_CH_EV_LEN;
+ /* Process channel status whose mask is enabled */
+ chintr_value = (mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask);
+ if (chintr_value) {
+ dev_dbg(dev,
+ "processing id: %d, ch interrupt 0x%x\n",
+ i, chintr_value);
+ mhi_ep_queue_channel_db(mhi_cntrl, chintr_value, ch_num);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers
+ */
+static void mhi_ep_chdb_ctrl_handler(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work,
+ struct mhi_ep_cntrl, chdb_ctrl_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ep_state state;
+ enum mhi_ep_event_type event = 0;
+ u32 int_value = 0;
+ int ret = 0;
+ bool mhi_reset;
+
+ mutex_lock(&mhi_cntrl->lock);
+
+ /* Acknowledge the interrupts */
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS_A7, &int_value);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7, int_value);
+
+ /* Check for cntrl interrupts */
+ if (int_value & MHI_MMIO_CTRL_INT_STATUS_A7_MSK) {
+ dev_dbg(dev, "Processing ctrl interrupt with : %d\n", int_value);
+
+ mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset);
+
+ /* TODO: Check for MHI host reset */
+
+ ret = mhi_ep_get_event(state, &event);
+ if (ret) {
+ dev_err(dev, "Unsupported state :%d\n", state);
+ goto fail;
+ }
+
+ ret = mhi_ep_notify_sm_event(mhi_cntrl, event);
+ if (ret) {
+ dev_err(dev, "error sending SM event\n");
+ goto fail;
+ }
+ }
+
+ /* Check for cmd db interrupts */
+ if (int_value & MHI_MMIO_CTRL_CRDB_STATUS_MSK) {
+ dev_dbg(dev,
+ "processing cmd db interrupt with %d\n", int_value);
+ /* TODO Mark pending ring */
+ queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work);
+ }
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+fail:
+ mutex_unlock(&mhi_cntrl->lock);
+ enable_irq(mhi_cntrl->irq);
+}
+
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+
+ disable_irq_nosync(irq);
+ schedule_work(&mhi_cntrl->chdb_ctrl_work);
+
+ return IRQ_HANDLED;
+}
+
+void mhi_ep_hw_init(struct work_struct *work)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, init_work);
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ int ret;
+
+ /* Mask all interrupts until the state machine is ready */
+ mhi_ep_mmio_mask_interrupts(mhi_cntrl);
+
+ mhi_ep_mmio_init(mhi_cntrl);
+ dev_dbg(dev, "Number of Event rings: %d, HW Event rings: %d\n",
+ mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings);
+
+ mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
+ GFP_KERNEL);
+ if (!mhi_cntrl->mhi_event)
+ return;
+
+ /* TODO: Initialize lock for all event rings */
+ spin_lock_init(&mhi_cntrl->mhi_event[0].lock);
+
+ /* Init state machine */
+ ret = mhi_ep_sm_init(mhi_cntrl);
+ if (ret)
+ kfree(mhi_cntrl->mhi_event);
+
+ /* All set, notify the host */
+ ret = mhi_ep_sm_set_ready(mhi_cntrl);
+ if (ret)
+ kfree(mhi_cntrl->mhi_event);
+
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(dev, mhi_cntrl->irq, mhi_ep_irq,
+ IRQF_TRIGGER_HIGH, "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(dev, "Failed to request Doorbell IRQ\n");
+ kfree(mhi_cntrl->mhi_event);
+ }
+
+ mhi_ep_enable(mhi_cntrl);
+
+ dev_dbg(dev, "Power on setup success\n");
+}
+
+static void skip_to_next_td(struct mhi_ep_chan *mhi_chan, struct mhi_ep_ring *ring)
+{
+ union mhi_ep_ring_element_type *el;
+ uint32_t td_boundary_reached = 0;
+
+ mhi_chan->skip_td = 1;
+ el = &ring->ring_cache[ring->rd_offset];
+ while (ring->rd_offset != ring->wr_offset) {
+ if (td_boundary_reached) {
+ mhi_chan->skip_td = 0;
+ break;
+ }
+ if (!el->tre.chain)
+ td_boundary_reached = 1;
+ mhi_ep_ring_inc_index(ring, ring->rd_offset);
+ el = &ring->ring_cache[ring->rd_offset];
+ }
+}
+
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags)
+{
+ struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan :
+ mhi_dev->ul_chan;
+ struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_ring *ring;
+ union mhi_ep_ring_element_type *el;
+ enum mhi_ep_cmd_completion_code code = MHI_CMD_COMPL_CODE_INVALID;
+ int ret;
+ u32 tre_len;
+ u64 write_to_loc, skip_tres = 0;
+ size_t read_from_loc;
+ uint32_t usr_buf_remaining;
+ size_t usr_buf_offset = 0;
+ size_t bytes_to_write = 0;
+ size_t bytes_written = 0;
+
+ if (dir == DMA_TO_DEVICE)
+ return -EINVAL;
+
+ usr_buf_remaining = len;
+ ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring;
+
+ if (mhi_chan->skip_td)
+ skip_to_next_td(mhi_chan, ring);
+
+ do {
+ if (ring->rd_offset == ring->wr_offset) {
+ dev_err(dev, "TRE not available!\n");
+ return -EINVAL;
+ }
+
+ el = &ring->ring_cache[ring->rd_offset];
+ tre_len = el->tre.len;
+ if (skb->len > tre_len) {
+ dev_err(dev, "Buffer size is too big to queue!\n");
+ return -ENOMEM;
+ }
+
+ bytes_to_write = min(usr_buf_remaining, tre_len);
+ usr_buf_offset = skb->len - bytes_to_write;
+ read_from_loc = (size_t) skb->data;
+ write_to_loc = el->tre.data_buf_ptr;
+
+ if (!mhi_chan->tre_buf) {
+ mhi_chan->tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_chan->tre_phys, bytes_to_write);
+ if (!mhi_chan->tre_buf) {
+ dev_err(dev, "Failed to allocate TRE buffer\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_chan->tre_phys, write_to_loc, bytes_to_write);
+ if (ret) {
+ dev_err(dev, "Failed to map TRE buffer\n");
+ goto err_tre_free;
+ }
+
+ dev_dbg(dev, "Writing to: %llx", el->tre.data_buf_ptr);
+ dev_dbg(dev, "Writing %d bytes to chan: %d", bytes_to_write, ring->ch_id);
+ memcpy_toio(mhi_chan->tre_buf, (void *)read_from_loc, bytes_to_write);
+
+ /* TODO: See if we can return bytes_written */
+ bytes_written += bytes_to_write;
+ usr_buf_remaining -= bytes_to_write;
+
+ if (usr_buf_remaining) {
+ if (!el->tre.chain)
+ code = MHI_CMD_COMPL_CODE_OVERFLOW;
+ else if (el->tre.ieob)
+ code = MHI_CMD_COMPL_CODE_EOB;
+ } else {
+ if (el->tre.chain)
+ skip_tres = 1;
+ code = MHI_CMD_COMPL_CODE_EOT;
+ }
+
+ dev_dbg(dev, "Sending completion code: %d", code);
+ /* TODO: Handle the completion code properly */
+ ret = mhi_ep_send_completion_event(mhi_cntrl, ring,
+ bytes_to_write, code);
+ if (ret) {
+ dev_err(dev, "Err in snding cmpl evt ch: %d\n", ring->ch_id);
+ goto err_tre_unmap;
+ }
+
+ mhi_ep_ring_inc_index(ring, ring->rd_offset);
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_chan->tre_phys);
+ } while (!skip_tres && usr_buf_remaining);
+
+ if (skip_tres)
+ skip_to_next_td(mhi_chan, ring);
+
+ return 0;
+
+err_tre_unmap:
+ mhi_cntrl->unmap_addr(mhi_cntrl, mhi_chan->tre_phys);
+err_tre_free:
+ mhi_cntrl->free_addr(mhi_cntrl, mhi_chan->tre_phys, mhi_chan->tre_buf, tre_len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mhi_ep_queue_skb);
+
+void mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ queue_work(mhi_cntrl->init_wq, &mhi_cntrl->init_work);
+}
+
+static void mhi_ep_release_device(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ kfree(mhi_dev);
+}
+
+struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+
+ mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL);
+ if (!mhi_dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = &mhi_dev->dev;
+ device_initialize(dev);
+ dev->bus = &mhi_ep_bus_type;
+ dev->release = mhi_ep_release_device;
+
+ if (mhi_cntrl->mhi_dev) {
+ /* for MHI client devices, parent is the MHI controller device */
+ dev->parent = &mhi_cntrl->mhi_dev->dev;
+ } else {
+ /* for MHI controller device, parent is the bus device (e.g. pci device) */
+ dev->parent = mhi_cntrl->cntrl_dev;
+ }
+
+ mhi_dev->mhi_cntrl = mhi_cntrl;
+
+ return mhi_dev;
+}
+
+int mhi_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id)
+{
+ struct mhi_ep_device *mhi_dev;
+ struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id];
+ int ret;
+
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev))
+ return PTR_ERR(mhi_dev);
+
+ mhi_dev->dev_type = MHI_DEVICE_XFER;
+
+ /* Configure primary channel */
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Configure secondary channel as well */
+ mhi_chan++;
+ if (mhi_chan->dir == DMA_TO_DEVICE) {
+ mhi_dev->ul_chan = mhi_chan;
+ mhi_dev->ul_chan_id = mhi_chan->chan;
+ } else {
+ mhi_dev->dl_chan = mhi_chan;
+ mhi_dev->dl_chan_id = mhi_chan->chan;
+ }
+
+ get_device(&mhi_dev->dev);
+ mhi_chan->mhi_dev = mhi_dev;
+
+ /* Channel name is same for both UL and DL */
+ mhi_dev->name = mhi_chan->name;
+ dev_set_name(&mhi_dev->dev, "%s_%s",
+ dev_name(&mhi_cntrl->mhi_dev->dev),
+ mhi_dev->name);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ put_device(&mhi_dev->dev);
+
+ return ret;
+}
+
+static int parse_ch_cfg(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ const struct mhi_ep_channel_config *ch_cfg;
+ struct device *dev = mhi_cntrl->cntrl_dev;
+ u32 chan, i;
+
+ mhi_cntrl->max_chan = config->max_channels;
+
+ /*
+ * The allocation of MHI channels can exceed 32KB in some scenarios,
+ * so to avoid any memory possible allocation failures, vzalloc is
+ * used here
+ */
+ mhi_cntrl->mhi_chan = kzalloc(mhi_cntrl->max_chan *
+ sizeof(*mhi_cntrl->mhi_chan), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_chan)
+ return -ENOMEM;
+
+ /* We allocate max_channels and then only populate the defined channels */
+ for (i = 0; i < config->num_channels; i++) {
+ struct mhi_ep_chan *mhi_chan;
+
+ ch_cfg = &config->ch_cfg[i];
+
+ chan = ch_cfg->num;
+ if (chan >= mhi_cntrl->max_chan) {
+ dev_err(dev, "Channel %d not available\n", chan);
+ goto error_chan_cfg;
+ }
+
+ mhi_chan = &mhi_cntrl->mhi_chan[chan];
+ mhi_chan->name = ch_cfg->name;
+ mhi_chan->chan = chan;
+ mhi_chan->dir = ch_cfg->dir;
+ mutex_init(&mhi_chan->lock);
+
+ /*
+ * Bi-directional and direction less channels are not supported
+ */
+ if (mhi_chan->dir == DMA_BIDIRECTIONAL || mhi_chan->dir == DMA_NONE) {
+ dev_err(dev, "Invalid channel configuration\n");
+ goto error_chan_cfg;
+ }
+
+ mhi_chan->configured = true;
+ }
+
+ return 0;
+
+error_chan_cfg:
+ kfree(mhi_cntrl->mhi_chan);
+
+ return -EINVAL;
+}
+
+static int parse_config(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ int ret;
+
+ ret = parse_ch_cfg(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Allocate channel and command rings here. The event rings will be allocated
+ * in mhi_ep_prepare_for_power_up() as it is set by the host.
+ */
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config)
+{
+ struct mhi_ep_device *mhi_dev;
+ int ret;
+
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
+ return -EINVAL;
+
+ ret = parse_config(mhi_cntrl, config);
+ if (ret)
+ return ret;
+
+ mhi_cntrl->mhi_cmd = kzalloc(NR_OF_CMD_RINGS *
+ sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL);
+ if (!mhi_cntrl->mhi_cmd) {
+ ret = -ENOMEM;
+ goto err_free_ch;
+ }
+
+ INIT_WORK(&mhi_cntrl->ring_work, mhi_ep_process_ring_pending);
+ INIT_WORK(&mhi_cntrl->chdb_ctrl_work, mhi_ep_chdb_ctrl_handler);
+ INIT_WORK(&mhi_cntrl->init_work, mhi_ep_hw_init);
+
+ mhi_cntrl->ring_wq = alloc_ordered_workqueue("mhi_ep_ring_wq",
+ WQ_HIGHPRI);
+ if (!mhi_cntrl->ring_wq) {
+ ret = -ENOMEM;
+ goto err_free_cmd;
+ }
+
+ mhi_cntrl->init_wq = alloc_ordered_workqueue("mhi_ep_init_wq", WQ_HIGHPRI);
+ if (!mhi_cntrl->init_wq) {
+ ret = -ENOMEM;
+ goto err_destroy_ring_wq;
+ }
+
+ INIT_LIST_HEAD(&mhi_cntrl->process_ring_list);
+ mutex_init(&mhi_cntrl->lock);
+ mutex_init(&mhi_cntrl->event_lock);
+
+ /* Set MHI version and AMSS EE before link up */
+ mhi_ep_mmio_write(mhi_cntrl, MHIVER, config->mhi_version);
+ mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE);
+
+ /* Register controller with MHI bus */
+ mhi_dev = mhi_ep_alloc_device(mhi_cntrl);
+ if (IS_ERR(mhi_dev)) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
+ ret = PTR_ERR(mhi_dev);
+ goto err_destroy_init_wq;
+ }
+
+ mhi_dev->dev_type = MHI_DEVICE_CONTROLLER;
+ dev_set_name(&mhi_dev->dev, "sdx55");
+ mhi_dev->name = dev_name(&mhi_dev->dev);
+
+ ret = device_add(&mhi_dev->dev);
+ if (ret)
+ goto err_release_dev;
+
+ mhi_cntrl->mhi_dev = mhi_dev;
+
+ dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n");
+
+ return 0;
+
+err_release_dev:
+ put_device(&mhi_dev->dev);
+err_destroy_init_wq:
+ destroy_workqueue(mhi_cntrl->init_wq);
+err_destroy_ring_wq:
+ destroy_workqueue(mhi_cntrl->ring_wq);
+err_free_cmd:
+ kfree(mhi_cntrl->mhi_cmd);
+err_free_ch:
+ vfree(mhi_cntrl->mhi_chan);
+
+ return ret;
+}
+
+static int mhi_ep_driver_probe(struct device *dev)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct device_driver *drv = dev->driver;
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan;
+ struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan;
+
+ if (ul_chan)
+ ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
+
+ if (dl_chan)
+ dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
+
+ return mhi_drv->probe(mhi_dev, mhi_dev->id);
+}
+
+static int mhi_ep_driver_remove(struct device *dev)
+{
+ return 0;
+}
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner)
+{
+ struct device_driver *driver = &mhi_drv->driver;
+
+ if (!mhi_drv->probe || !mhi_drv->remove)
+ return -EINVAL;
+
+ driver->bus = &mhi_ep_bus_type;
+ driver->owner = owner;
+ driver->probe = mhi_ep_driver_probe;
+ driver->remove = mhi_ep_driver_remove;
+
+ return driver_register(driver);
+}
+EXPORT_SYMBOL_GPL(__mhi_ep_driver_register);
+
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv)
+{
+ driver_unregister(&mhi_drv->driver);
+}
+EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister);
+
+static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+
+ return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT,
+ mhi_dev->name);
+}
+
+static int mhi_ep_match(struct device *dev, struct device_driver *drv)
+{
+ struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+ struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv);
+ const struct mhi_device_id *id;
+
+ /*
+ * If the device is a controller type then there is no client driver
+ * associated with it
+ */
+ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
+ return 0;
+
+ for (id = mhi_drv->id_table; id->chan[0]; id++)
+ if (!strcmp(mhi_dev->name, id->chan)) {
+ mhi_dev->id = id;
+ return 1;
+ }
+
+ return 0;
+};
+
+struct bus_type mhi_ep_bus_type = {
+ .name = "mhi_ep",
+ .dev_name = "mhi_ep",
+ .match = mhi_ep_match,
+ .uevent = mhi_ep_uevent,
+};
+
+static int __init mhi_ep_init(void)
+{
+ return bus_register(&mhi_ep_bus_type);
+}
+
+static void __exit mhi_ep_exit(void)
+{
+ bus_unregister(&mhi_ep_bus_type);
+}
+
+postcore_initcall(mhi_ep_init);
+module_exit(mhi_ep_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MHI Device Implementation");
diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c
new file mode 100644
index 000000000000..6758574e8912
--- /dev/null
+++ b/drivers/bus/mhi/ep/mmio.c
@@ -0,0 +1,386 @@
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mhi_ep.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+
+void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval)
+{
+ *regval = readl(mhi_cntrl->mmio + offset);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_read);
+
+void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val)
+{
+ writel(val, mhi_cntrl->mmio + offset);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_write);
+
+void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 regval;
+
+ mhi_ep_mmio_read(mhi_cntrl, offset, &regval);
+ regval &= ~mask;
+ regval |= ((val << shift) & mask);;
+ mhi_ep_mmio_write(mhi_cntrl, offset, regval);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_masked_write);
+
+int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset,
+ u32 mask, u32 shift, u32 *regval)
+{
+ mhi_ep_mmio_read(dev, offset, regval);
+ *regval &= mask;
+ *regval >>= shift;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_masked_read);
+
+void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_state *state,
+ bool *mhi_reset)
+{
+ u32 regval;
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICTRL, &regval);
+ *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval);
+ *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_mhi_state);
+
+static void mhi_ep_mmio_mask_set_chdb_int_a7(struct mhi_ep_cntrl *mhi_cntrl,
+ u32 chdb_id, bool enable)
+{
+ u32 chid_mask, chid_idx, chid_shft, val = 0;
+
+ chid_shft = chdb_id % 32;
+ chid_mask = BIT(chid_shft);
+ chid_idx = chdb_id / 32;
+
+ if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB)
+ return;
+
+ if (enable)
+ val = 1;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ chid_mask, chid_shft, val);
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx),
+ &mhi_cntrl->chdb[chid_idx].mask);
+}
+
+void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id)
+{
+ mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, true);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_chdb_a7);
+
+void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id)
+{
+ mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, false);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_disable_chdb_a7);
+
+static void mhi_ep_mmio_set_erdb_int_a7(struct mhi_ep_cntrl *mhi_cntrl,
+ u32 erdb_ch_id, bool enable)
+{
+ u32 erdb_id_shft, erdb_id_mask, erdb_id_idx, val = 0;
+
+ erdb_id_shft = erdb_ch_id % 32;
+ erdb_id_mask = BIT(erdb_id_shft);
+ erdb_id_idx = erdb_ch_id / 32;
+
+ if (erdb_id_idx >= MHI_MASK_ROWS_CH_EV_DB)
+ return;
+
+ if (enable)
+ val = 1;
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_ERDB_INT_MASK_A7_n(erdb_id_idx),
+ erdb_id_mask, erdb_id_shft, val);
+}
+
+void mhi_ep_mmio_enable_erdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 erdb_id)
+{
+ mhi_ep_mmio_set_erdb_int_a7(mhi_cntrl, erdb_id, true);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_erdb_a7);
+
+void mhi_ep_mmio_disable_erdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 erdb_id)
+{
+ mhi_ep_mmio_set_erdb_int_a7(mhi_cntrl, erdb_id, false);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_disable_erdb_a7);
+
+static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val = 0, i = 0;
+
+ if (enable)
+ val = MHI_CHDB_INT_MASK_A7_n_EN_ALL;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) {
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(i), val);
+ mhi_cntrl->chdb[i].mask = val;
+ }
+}
+
+void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_chdb_interrupts);
+
+void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_mask_chdb_interrupts);
+
+void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_A7_n(i),
+ &mhi_cntrl->chdb[i].status);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_read_chdb_status_interrupts);
+
+static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable)
+{
+ u32 val = 0, i;
+
+ if (enable)
+ val = MHI_ERDB_INT_MASK_A7_n_EN_ALL;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_A7_n(i), val);
+}
+
+void mhi_ep_mmio_enable_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, true);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_erdb_interrupts);
+
+void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_mask_erdb_interrupts);
+
+void mhi_ep_mmio_read_erdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_read(mhi_cntrl, MHI_ERDB_INT_STATUS_A7_n(i),
+ &mhi_cntrl->evdb[i].status);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_read_erdb_status_interrupts);
+
+void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK,
+ MHI_CTRL_MHICTRL_SHFT, 1);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_ctrl_interrupt);
+
+void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_MHICTRL_MASK,
+ MHI_CTRL_MHICTRL_SHFT, 0);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_disable_ctrl_interrupt);
+
+void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK,
+ MHI_CTRL_CRDB_SHFT, 1);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_enable_cmdb_interrupt);
+
+void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7,
+ MHI_CTRL_CRDB_MASK,
+ MHI_CTRL_CRDB_SHFT, 0);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_disable_cmdb_interrupt);
+
+void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl);
+ mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl);
+ mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl);
+ mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_mask_interrupts);
+
+void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 i = 0;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i),
+ MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL);
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++)
+ mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_A7_n(i),
+ MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL);
+
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7,
+ MHI_CTRL_INT_MMIO_WR_CLEAR |
+ MHI_CTRL_INT_CRDB_CLEAR |
+ MHI_CTRL_INT_CRDB_MHICTRL_CLEAR);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_clear_interrupts);
+
+void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ccabap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, CCABAP_HIGHER, &ccabap_value);
+ mhi_cntrl->ch_ctx_host_pa = ccabap_value;
+ mhi_cntrl->ch_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, CCABAP_LOWER, &ccabap_value);
+ mhi_cntrl->ch_ctx_host_pa |= ccabap_value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_chc_base);
+
+void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ecabap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ECABAP_HIGHER, &ecabap_value);
+ mhi_cntrl->ev_ctx_host_pa = ecabap_value;
+ mhi_cntrl->ev_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ECABAP_LOWER, &ecabap_value);
+ mhi_cntrl->ev_ctx_host_pa |= ecabap_value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_erc_base);
+
+void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 crcbap_value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, CRCBAP_HIGHER, &crcbap_value);
+ mhi_cntrl->cmd_ctx_host_pa = crcbap_value;
+ mhi_cntrl->cmd_ctx_host_pa <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, CRCBAP_LOWER, &crcbap_value);
+ mhi_cntrl->cmd_ctx_host_pa |= crcbap_value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_crc_base);
+
+void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_offset)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+
+ *wr_offset |= value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_ch_db);
+
+void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_offset)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+
+ *wr_offset |= value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_er_db);
+
+void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_offset)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ u32 value = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value);
+ *wr_offset = value;
+ *wr_offset <<= 32;
+
+ mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value);
+ *wr_offset |= value;
+}
+EXPORT_SYMBOL(mhi_ep_mmio_get_cmd_db);
+
+void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value)
+{
+ mhi_ep_mmio_write(mhi_cntrl, BHI_EXECENV, value);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_set_env);
+
+void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHICTRL, MHICTRL_RESET_MASK,
+ MHICTRL_RESET_SHIFT, 0);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_clear_reset);
+
+void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ mhi_ep_mmio_write(mhi_cntrl, MHICTRL, 0);
+ mhi_ep_mmio_write(mhi_cntrl, MHISTATUS, 0);
+ mhi_ep_mmio_clear_interrupts(mhi_cntrl);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_reset);
+
+void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ int mhi_cfg = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, MHIREGLEN, &mhi_cntrl->reg_len);
+ mhi_ep_mmio_read(mhi_cntrl, CHDBOFF, &mhi_cntrl->chdb_offset);
+ mhi_ep_mmio_read(mhi_cntrl, ERDBOFF, &mhi_cntrl->erdb_offset);
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg);
+
+ mhi_ep_mmio_reset(mhi_cntrl);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_init);
+
+void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ int mhi_cfg = 0;
+
+ mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg);
+ mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg);
+ mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg);
+}
+EXPORT_SYMBOL(mhi_ep_mmio_update_ner);
diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c
new file mode 100644
index 000000000000..74f4f26c662f
--- /dev/null
+++ b/drivers/bus/mhi/ep/ring.c
@@ -0,0 +1,366 @@
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mhi_ep.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+
+#include "internal.h"
+
+static size_t mhi_ep_ring_addr2ofst(struct mhi_ep_ring *ring, u64 p)
+{
+ u64 rbase;
+
+ rbase = ring->ring_ctx->generic.rbase;
+
+ return (p - rbase) / sizeof(union mhi_ep_ring_element_type);
+}
+
+static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring)
+{
+ return ring->ring_ctx->generic.rlen /
+ sizeof(union mhi_ep_ring_element_type);
+}
+
+int mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t start, copy_size;
+ union mhi_ep_ring_element_type *ring_shadow;
+ phys_addr_t ring_shadow_phys;
+ size_t size = ring->ring_size * sizeof(union mhi_ep_ring_element_type);
+ int ret;
+
+ /* No need to cache the ring if wp is unmodified */
+ if (ring->wr_offset == end) {
+ dev_dbg(dev,
+ "nothing to cache for ring (%d), local wr_ofst %d\n",
+ ring->type, ring->wr_offset);
+ dev_dbg(dev,
+ "new wr_offset %d\n", end);
+ return 0;
+ }
+
+ /* No need to cache event rings */
+ if (ring->type == RING_TYPE_ER) {
+ dev_dbg(dev,
+ "not caching event ring\n");
+ return 0;
+ }
+
+ start = ring->wr_offset;
+
+ /* Map the host ring */
+ ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys,
+ size);
+ if (!ring_shadow) {
+ dev_err(dev, "failed to allocate ring_shadow\n");
+ return -ENOMEM;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys,
+ ring->ring_ctx->generic.rbase, size);
+ if (ret) {
+ dev_err(dev, "failed to map ring_shadow\n\n");
+ goto err_ring_free;
+ }
+
+ if (start < end) {
+ copy_size = (end - start) * sizeof(union mhi_ep_ring_element_type);
+ memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size);
+ } else {
+ copy_size = (ring->ring_size - start) * sizeof(union mhi_ep_ring_element_type);
+ memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size);
+ if (end)
+ memcpy_fromio(&ring->ring_cache[0], &ring_shadow,
+ end * sizeof(union mhi_ep_ring_element_type));
+ }
+
+ dev_dbg(dev, "Caching ring (%d) start %d end %d size %d",
+ ring->type, start, end, copy_size);
+
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, size);
+
+ return 0;
+err_ring_free:
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, &ring_shadow, size);
+
+ return ret;
+}
+
+int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ u64 wr_offset = 0;
+ size_t new_wr_offset = 0;
+ int ret;
+
+ switch (ring->type) {
+ case RING_TYPE_CMD:
+ mhi_ep_mmio_get_cmd_db(ring, &wr_offset);
+ dev_dbg(dev,
+ "ring type (%d) wr_offset from db 0x%x\n",
+ ring->type, (size_t) wr_offset);
+ break;
+ case RING_TYPE_ER:
+ mhi_ep_mmio_get_er_db(ring, &wr_offset);
+ break;
+ case RING_TYPE_CH:
+ mhi_ep_mmio_get_ch_db(ring, &wr_offset);
+ dev_dbg(dev,
+ "ring %d wr_offset from db 0x%x\n",
+ ring->type, (size_t) wr_offset);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ new_wr_offset = mhi_ep_ring_addr2ofst(ring, wr_offset);
+
+ ret = mhi_ep_cache_ring(ring, new_wr_offset);
+ if (ret)
+ return ret;
+
+ ring->wr_offset = new_wr_offset;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_update_wr_offset);
+
+int mhi_ep_process_ring_element(struct mhi_ep_ring *ring, size_t offset)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_element_type *el;
+ int ret = -EINVAL;
+
+ /* get the element and invoke the respective callback */
+ el = &ring->ring_cache[offset];
+
+ if (ring->ring_cb)
+ ret = ring->ring_cb(ring, el);
+ else
+ dev_err(dev, "No callback registered for ring\n");
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_ep_process_ring_element);
+
+int mhi_ep_process_ring(struct mhi_ep_ring *ring)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ union mhi_ep_ring_element_type *el;
+ int rc = 0;
+
+ dev_dbg(dev,
+ "Before wr update ring_type (%d) element (%d) with wr:%d\n",
+ ring->type, ring->rd_offset, ring->wr_offset);
+
+ rc = mhi_ep_update_wr_offset(ring);
+ if (rc) {
+ dev_err(dev, "Error updating write-offset for ring\n");
+ return rc;
+ }
+
+ /* TODO see if can be deleted */
+ /* get the element and invoke the respective callback */
+ el = &ring->ring_cache[ring->wr_offset];
+
+ if (ring->type == RING_TYPE_CH) {
+ /* notify the clients that there are elements in the ring */
+ dev_dbg(dev, "processing channel ring element!");
+ rc = mhi_ep_process_ring_element(ring, ring->rd_offset);
+ if (rc)
+ pr_err("Error fetching elements\n");
+ return rc;
+ }
+
+ while (ring->rd_offset != ring->wr_offset) {
+ rc = mhi_ep_process_ring_element(ring, ring->rd_offset);
+ if (rc) {
+ dev_err(dev,
+ "Error processing ring element (%d)\n",
+ ring->rd_offset);
+ return rc;
+ }
+
+ dev_dbg(dev, "Processing ring rd_offset:%d, wr_offset:%d\n",
+ ring->rd_offset, ring->wr_offset);
+ mhi_ep_ring_inc_index(ring, ring->rd_offset);
+ }
+
+ if (!(ring->rd_offset == ring->wr_offset)) {
+ dev_err(dev, "Error with the rd offset/wr offset\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_process_ring);
+
+/* TODO See if we can avoid passing mhi_cntrl */
+int mhi_ep_ring_add_element(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_element_type *element,
+ struct event_req *ereq, int size)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t old_offset = 0;
+ union mhi_ep_ring_element_type *ring_shadow;
+ phys_addr_t ring_shadow_phys;
+ size_t ring_size = ring->ring_size * sizeof(union mhi_ep_ring_element_type);
+ u32 num_elem = 1;
+ u32 num_free_elem;
+ int ret;
+
+ ret = mhi_ep_update_wr_offset(ring);
+ if (ret) {
+ dev_err(dev, "Error updating write pointer\n");
+ return ret;
+ }
+
+ if (ereq)
+ num_elem = size / (sizeof(union mhi_ep_ring_element_type));
+
+ if (ring->rd_offset < ring->wr_offset)
+ num_free_elem = ring->wr_offset - ring->rd_offset - 1;
+ else
+ num_free_elem = ring->ring_size - ring->rd_offset +
+ ring->wr_offset - 1;
+
+ if (num_free_elem < num_elem) {
+ dev_err(dev, "No space to add %d elem in ring\n",
+ num_elem);
+ return -EINVAL;
+ }
+
+ old_offset = ring->rd_offset;
+
+ if (ereq) {
+ ring->rd_offset += num_elem;
+ if (ring->rd_offset >= ring->ring_size)
+ ring->rd_offset -= ring->ring_size;
+ } else
+ mhi_ep_ring_inc_index(ring, ring->rd_offset);
+
+ dev_dbg(dev,
+ "Writing %d elements, ring old 0x%x, new 0x%x\n",
+ num_elem, old_offset, ring->rd_offset);
+
+ /* Update rp */
+ ring->ring_ctx->generic.rp = (ring->rd_offset *
+ sizeof(union mhi_ep_ring_element_type)) +
+ ring->ring_ctx->generic.rbase;
+
+ /* Map the host ring */
+ ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys, ring_size);
+ if (!ring_shadow) {
+ dev_err(dev, "failed to allocate ring_shadow\n");
+ return -ENOMEM;
+ }
+
+ ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys,
+ ring->ring_ctx->generic.rbase, ring_size);
+ if (ret) {
+ dev_err(dev, "failed to map ring_shadow\n\n");
+ goto err_ring_free;
+ }
+
+ /* Copy the element to ring */
+ if (!ereq)
+ memcpy_toio(&ring_shadow[old_offset], element,
+ sizeof(union mhi_ep_ring_element_type));
+
+ mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys);
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size);
+
+ /* TODO: Adding multiple ring elements */
+
+ return 0;
+err_ring_free:
+ mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size);
+
+ return ret;
+}
+EXPORT_SYMBOL(mhi_ep_ring_add_element);
+
+void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type,
+ u32 id)
+{
+ ring->state = RING_STATE_UINT;
+ ring->type = type;
+ if (ring->type == RING_TYPE_CMD) {
+ ring->ring_cb = mhi_ep_process_cmd_ring;
+ ring->db_offset_h = CRDB_HIGHER;
+ ring->db_offset_l = CRDB_LOWER;
+ } else if (ring->type == RING_TYPE_CH) {
+ ring->ring_cb = mhi_ep_process_tre_ring;
+ ring->db_offset_h = CHDB_HIGHER_n(id);
+ ring->db_offset_l = CHDB_LOWER_n(id);
+ ring->ch_id = id;
+ } else if (ring->type == RING_TYPE_ER) {
+ ring->db_offset_h = ERDB_HIGHER_n(id);
+ ring->db_offset_l = ERDB_LOWER_n(id);
+ }
+}
+
+/* TODO See if we can avoid passing mhi_cntrl */
+int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring,
+ union mhi_ep_ring_ctx *ctx)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ size_t wr_offset = 0;
+ int ret;
+
+ ring->ring_ctx = ctx;
+ ring->mhi_cntrl = mhi_cntrl;
+ dev_dbg(dev, "rbase: %llx", ring->ring_ctx->generic.rbase);
+ ring->ring_size = mhi_ep_ring_num_elems(ring);
+
+ /* During init, both rp and wp are equal */
+ ring->rd_offset = mhi_ep_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->wr_offset = mhi_ep_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.rp);
+ ring->state = RING_STATE_IDLE;
+
+ wr_offset = mhi_ep_ring_addr2ofst(ring,
+ ring->ring_ctx->generic.wp);
+
+ if (!ring->ring_cache) {
+ ring->ring_cache = kcalloc(ring->ring_size,
+ sizeof(union mhi_ep_ring_element_type),
+ GFP_KERNEL);
+ if (!ring->ring_cache) {
+ dev_err(dev, "Failed to allocate ring cache\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* TODO: Check this */
+ if (ring->type != RING_TYPE_ER || ring->type != RING_TYPE_CH) {
+ ret = mhi_ep_cache_ring(ring, wr_offset);
+ if (ret)
+ return ret;
+ }
+
+ ring->wr_offset = wr_offset;
+
+ dev_dbg(dev, "ctx ring_base:0x%x, rp:0x%x, wp:0x%x\n",
+ (size_t)ring->ring_ctx->generic.rbase,
+ (size_t)ring->ring_ctx->generic.rp,
+ (size_t)ring->ring_ctx->generic.wp);
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_ring_start);
diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c
new file mode 100644
index 000000000000..5d7b5d84a26e
--- /dev/null
+++ b/drivers/bus/mhi/ep/sm.c
@@ -0,0 +1,436 @@
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/mhi_ep.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+
+#include "internal.h"
+
+static const char *mhi_sm_dev_event_str(enum mhi_ep_event_type state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_EP_EVENT_CTRL_TRIG:
+ str = "MHI_EP_EVENT_CTRL_TRIG";
+ break;
+ case MHI_EP_EVENT_M0_STATE:
+ str = "MHI_EP_EVENT_M0_STATE";
+ break;
+ case MHI_EP_EVENT_M1_STATE:
+ str = "MHI_EP_EVENT_M1_STATE";
+ break;
+ case MHI_EP_EVENT_M2_STATE:
+ str = "MHI_EP_EVENT_M2_STATE";
+ break;
+ case MHI_EP_EVENT_M3_STATE:
+ str = "MHI_EP_EVENT_M3_STATE";
+ break;
+ case MHI_EP_EVENT_HW_ACC_WAKEUP:
+ str = "MHI_EP_EVENT_HW_ACC_WAKEUP";
+ break;
+ case MHI_EP_EVENT_CORE_WAKEUP:
+ str = "MHI_EP_EVENT_CORE_WAKEUP";
+ break;
+ default:
+ str = "INVALID MHI_EP_EVENT";
+ }
+
+ return str;
+}
+
+static const char *mhi_sm_mstate_str(enum mhi_ep_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_EP_RESET_STATE:
+ str = "RESET";
+ break;
+ case MHI_EP_READY_STATE:
+ str = "READY";
+ break;
+ case MHI_EP_M0_STATE:
+ str = "M0";
+ break;
+ case MHI_EP_M1_STATE:
+ str = "M1";
+ break;
+ case MHI_EP_M2_STATE:
+ str = "M2";
+ break;
+ case MHI_EP_M3_STATE:
+ str = "M3";
+ break;
+ case MHI_EP_SYSERR_STATE:
+ str = "SYSTEM ERROR";
+ break;
+ default:
+ str = "INVALID";
+ break;
+ }
+
+ return str;
+}
+
+static const char *mhi_sm_dstate_str(enum mhi_ep_pcie_state state)
+{
+ const char *str;
+
+ switch (state) {
+ case MHI_EP_PCIE_LINK_DISABLE:
+ str = "EP_PCIE_LINK_DISABLE";
+ break;
+ case MHI_EP_PCIE_D0_STATE:
+ str = "D0_STATE";
+ break;
+ case MHI_EP_PCIE_D3_HOT_STATE:
+ str = "D3_HOT_STATE";
+ break;
+ case MHI_EP_PCIE_D3_COLD_STATE:
+ str = "D3_COLD_STATE";
+ break;
+ default:
+ str = "INVALID D-STATE";
+ break;
+ }
+
+ return str;
+}
+
+static inline const char *mhi_sm_pcie_event_str(enum mhi_ep_pcie_event event)
+{
+ const char *str;
+
+ switch (event) {
+ case MHI_EP_PCIE_EVENT_LINKDOWN:
+ str = "EP_PCIE_LINKDOWN_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_LINKUP:
+ str = "EP_PCIE_LINKUP_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_PM_D3_HOT:
+ str = "EP_PCIE_PM_D3_HOT_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_PM_D3_COLD:
+ str = "EP_PCIE_PM_D3_COLD_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_PM_RST_DEAST:
+ str = "EP_PCIE_PM_RST_DEAST_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_PM_D0:
+ str = "EP_PCIE_PM_D0_EVENT";
+ break;
+ case MHI_EP_PCIE_EVENT_MHI_A7:
+ str = "EP_PCIE_MHI_A7";
+ break;
+ default:
+ str = "INVALID_PCIE_EVENT";
+ break;
+ }
+
+ return str;
+}
+
+static void mhi_ep_sm_mmio_set_status(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_ep_state state)
+{
+ struct mhi_ep_sm *sm = mhi_cntrl->sm;
+
+ switch (state) {
+ case MHI_EP_READY_STATE:
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "set MHISTATUS to READY mode\n");
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, 1);
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_EP_SYSERR_STATE:
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "set MHISTATUS to SYSTEM ERROR mode\n");
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_SYSERR_MASK,
+ MHISTATUS_SYSERR_SHIFT, 1);
+
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ case MHI_EP_M1_STATE:
+ case MHI_EP_M2_STATE:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Not supported state, can't set MHISTATUS to %s\n",
+ mhi_sm_mstate_str(state));
+ return;
+ case MHI_EP_M0_STATE:
+ case MHI_EP_M3_STATE:
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "set MHISTATUS.MHISTATE to %s state\n",
+ mhi_sm_mstate_str(state));
+ mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, state);
+ break;
+ default:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Invalid mhi state: 0x%x state", state);
+ return;
+ }
+
+ sm->state = state;
+}
+
+/**
+ * mhi_sm_is_legal_event_on_state() - Determine if MHI state transition is valid
+ * @curr_state: current MHI state
+ * @event: MHI state change event
+ *
+ * Determine according to MHI state management if the state change event
+ * is valid on the current mhi state.
+ * Note: The decision doesn't take into account M1 and M2 states.
+ *
+ * Return: true: transition is valid
+ * false: transition is not valid
+ */
+static bool mhi_sm_is_legal_event_on_state(struct mhi_ep_sm *sm,
+ enum mhi_ep_state curr_state,
+ enum mhi_ep_event_type event)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = sm->mhi_cntrl;
+ bool res;
+
+ switch (event) {
+ case MHI_EP_EVENT_M0_STATE:
+ res = (sm->d_state == MHI_EP_PCIE_D0_STATE &&
+ curr_state != MHI_EP_RESET_STATE);
+ break;
+ case MHI_EP_EVENT_M3_STATE:
+ case MHI_EP_EVENT_HW_ACC_WAKEUP:
+ case MHI_EP_EVENT_CORE_WAKEUP:
+ res = (curr_state == MHI_EP_M3_STATE ||
+ curr_state == MHI_EP_M0_STATE);
+ break;
+ default:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Received invalid event: %s\n",
+ mhi_sm_dev_event_str(event));
+ res = false;
+ break;
+ }
+
+ return res;
+}
+
+/**
+ * mhi_sm_change_to_M0() - switch to M0 state.
+ *
+ * Switch MHI-device state to M0, if possible according to MHI state machine.
+ * Notify the MHI-host on the transition. If MHI is suspended, resume MHI.
+ *
+ * Return: 0: success
+ * negative: failure
+ */
+static int mhi_sm_change_to_M0(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_sm *sm = mhi_cntrl->sm;
+ enum mhi_ep_state old_state;
+ int ret;
+
+ old_state = sm->state;
+
+ switch (old_state) {
+ case MHI_EP_M0_STATE:
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Nothing to do, already in M0 state\n");
+ return 0;
+ case MHI_EP_M3_STATE:
+ case MHI_EP_READY_STATE:
+ break;
+ default:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "unexpected old_state: %s\n",
+ mhi_sm_mstate_str(old_state));
+ return -EINVAL;
+ }
+
+ mhi_ep_sm_mmio_set_status(mhi_cntrl, MHI_EP_M0_STATE);
+
+ /* Tell the host, device move to M0 */
+ if (old_state == MHI_EP_M3_STATE) {
+ /* TODO: Resume MHI */
+#if 0
+ res = mhi_ep_resume(sm);
+ if (res) {
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed resuming mhi core, returned %d",
+ res);
+ goto exit;
+ }
+#endif
+ }
+
+ ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_EP_M0_STATE);
+ if (ret) {
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed sending M0 state change event to host: %d\n", ret);
+ return ret;
+ }
+
+ if (old_state == MHI_EP_READY_STATE) {
+ /* Allow the host to process state change event */
+ mdelay(1);
+
+ /* Tell the host the EE */
+ ret = mhi_ep_send_ee_event(mhi_cntrl, 2);
+ if (ret) {
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed sending EE event to host: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void mhi_ep_sm_handle_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_event_type event)
+{
+ struct mhi_ep_sm *sm = mhi_cntrl->sm;
+ int ret;
+
+ mutex_lock(&sm->lock);
+ dev_dbg(&mhi_cntrl->mhi_dev->dev, "Start handling %s event, current states: %s & %s\n",
+ mhi_sm_dev_event_str(event),
+ mhi_sm_mstate_str(sm->state),
+ mhi_sm_dstate_str(sm->d_state));
+
+ /* TODO: Check for syserr before handling the event */
+
+ if (!mhi_sm_is_legal_event_on_state(sm, sm->state, event)) {
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Event %s illegal in current MHI states: %s and %s\n",
+ mhi_sm_dev_event_str(event),
+ mhi_sm_mstate_str(sm->state),
+ mhi_sm_dstate_str(sm->d_state));
+ /* TODO: Transition to syserr */
+ goto unlock_and_exit;
+ }
+
+ switch (event) {
+ case MHI_EP_EVENT_M0_STATE:
+ ret = mhi_sm_change_to_M0(mhi_cntrl);
+ if (ret)
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed switching to M0 state\n");
+ break;
+ case MHI_EP_EVENT_M3_STATE:
+// ret = mhi_sm_change_to_M3();
+ if (ret)
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed switching to M3 state\n");
+// mhi_ep_pm_relax();
+ break;
+ case MHI_EP_EVENT_HW_ACC_WAKEUP:
+ case MHI_EP_EVENT_CORE_WAKEUP:
+// ret = mhi_sm_wakeup_host(event);
+ if (ret)
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Failed to wakeup MHI host\n");
+ break;
+ case MHI_EP_EVENT_CTRL_TRIG:
+ case MHI_EP_EVENT_M1_STATE:
+ case MHI_EP_EVENT_M2_STATE:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Error: %s event is not supported\n",
+ mhi_sm_dev_event_str(event));
+ break;
+ default:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Error: Invalid event, 0x%x", event);
+ break;
+ }
+
+unlock_and_exit:
+ mutex_unlock(&sm->lock);
+}
+
+int mhi_ep_notify_sm_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_event_type event)
+{
+ int ret;
+
+ switch (event) {
+ case MHI_EP_EVENT_M0_STATE:
+// sm->stats.m0_event_cnt++;
+ break;
+ case MHI_EP_EVENT_M3_STATE:
+// sm->stats.m3_event_cnt++;
+ break;
+ case MHI_EP_EVENT_HW_ACC_WAKEUP:
+// sm->stats.hw_acc_wakeup_event_cnt++;
+ break;
+ case MHI_EP_EVENT_CORE_WAKEUP:
+// sm->stats.mhi_core_wakeup_event_cnt++;
+ break;
+ case MHI_EP_EVENT_CTRL_TRIG:
+ case MHI_EP_EVENT_M1_STATE:
+ case MHI_EP_EVENT_M2_STATE:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Received unsupported event: %s\n",
+ mhi_sm_dev_event_str(event));
+ return -ENOTSUPP;
+ goto exit;
+ default:
+ dev_err(&mhi_cntrl->mhi_dev->dev, "Received invalid event: %d\n", event);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* Change to wq is possible */
+ mhi_ep_sm_handle_event(mhi_cntrl, event);
+
+ return 0;
+
+exit:
+ return ret;
+}
+EXPORT_SYMBOL(mhi_ep_notify_sm_event);
+
+int mhi_ep_sm_set_ready(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct mhi_ep_sm *sm = mhi_cntrl->sm;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_ep_state state;
+ int is_ready;
+
+ mutex_lock(&sm->lock);
+
+ /* Ensure that the MHISTATUS is set to RESET by host */
+ mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT, &state);
+ mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_READY_MASK,
+ MHISTATUS_READY_SHIFT, &is_ready);
+
+ if (state != MHI_EP_RESET_STATE || is_ready) {
+ dev_err(dev, "READY transition failed. MHI host not in RESET state\n");
+ mutex_unlock(&sm->lock);
+ return -EFAULT;
+ }
+
+ mhi_ep_sm_mmio_set_status(mhi_cntrl, MHI_EP_READY_STATE);
+
+ mutex_unlock(&sm->lock);
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_sm_set_ready);
+
+int mhi_ep_sm_init(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_ep_sm *sm;
+
+ sm = devm_kzalloc(dev, sizeof(*mhi_cntrl->sm), GFP_KERNEL);
+ if (!sm)
+ return -ENOMEM;
+
+ sm->sm_wq = alloc_workqueue("mhi_ep_sm_wq", WQ_HIGHPRI | WQ_UNBOUND, 1);
+ if (!sm->sm_wq) {
+ dev_err(dev, "Failed to create SM workqueue\n");
+ return -ENOMEM;
+ }
+
+ sm->mhi_cntrl = mhi_cntrl;
+ sm->state = MHI_EP_RESET_STATE;
+ sm->d_state = MHI_EP_PCIE_D0_STATE;
+ mutex_init(&sm->lock);
+ mhi_cntrl->sm = sm;
+
+ return 0;
+}
+EXPORT_SYMBOL(mhi_ep_sm_init);
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 76c0a63a3f64..49bcbe18a575 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -180,6 +180,16 @@ config PCIE_QCOM
PCIe controller uses the DesignWare core plus Qualcomm-specific
hardware wrappers.
+config PCIE_QCOM_EP
+ tristate "Qualcomm PCIe controller - Endpoint mode"
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ depends on PCI_ENDPOINT
+ select PCIE_DW_EP
+ help
+ Say Y here to enable support for the PCIe controllers on Qualcomm SoCs
+ to work in endpoint mode. The PCIe controller uses the DesignWare core
+ plus Qualcomm-specific hardware wrappers.
+
config PCIE_ARMADA_8K
bool "Marvell Armada-8K PCIe controller"
depends on ARCH_MVEBU || COMPILE_TEST
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 73244409792c..8ba7b67f5e50 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
+obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
new file mode 100644
index 000000000000..c50783c00a5f
--- /dev/null
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm PCIe Endpoint controller driver
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Author: Siddartha Mohanadoss <smohanad@codeaurora.org
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include "pcie-designware.h"
+
+/* PARF registers */
+#define PARF_SYS_CTRL 0x00
+#define PARF_DB_CTRL 0x10
+#define PARF_PM_CTRL 0x20
+#define PARF_MHI_BASE_ADDR_LOWER 0x178
+#define PARF_MHI_BASE_ADDR_UPPER 0x17c
+#define PARF_DEBUG_INT_EN 0x190
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4
+#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8
+#define PARF_Q2A_FLUSH 0x1ac
+#define PARF_LTSSM 0x1b0
+#define PARF_CFG_BITS 0x210
+#define PARF_INT_ALL_STATUS 0x224
+#define PARF_INT_ALL_CLEAR 0x228
+#define PARF_INT_ALL_MASK 0x22c
+#define PARF_SLV_ADDR_MSB_CTRL 0x2c0
+#define PARF_DBI_BASE_ADDR 0x350
+#define PARF_DBI_BASE_ADDR_HI 0x354
+#define PARF_SLV_ADDR_SPACE_SIZE 0x358
+#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c
+#define PARF_ATU_BASE_ADDR 0x634
+#define PARF_ATU_BASE_ADDR_HI 0x638
+#define PARF_SRIS_MODE 0x644
+#define PARF_DEVICE_TYPE 0x1000
+#define PARF_BDF_TO_SID_CFG 0x2c00
+
+/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */
+#define PARF_INT_ALL_LINK_DOWN BIT(1)
+#define PARF_INT_ALL_BME BIT(2)
+#define PARF_INT_ALL_PM_TURNOFF BIT(3)
+#define PARF_INT_ALL_DEBUG BIT(4)
+#define PARF_INT_ALL_LTR BIT(5)
+#define PARF_INT_ALL_MHI_Q6 BIT(6)
+#define PARF_INT_ALL_MHI_A7 BIT(7)
+#define PARF_INT_ALL_DSTATE_CHANGE BIT(8)
+#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9)
+#define PARF_INT_ALL_MMIO_WRITE BIT(10)
+#define PARF_INT_ALL_CFG_WRITE BIT(11)
+#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12)
+#define PARF_INT_ALL_LINK_UP BIT(13)
+#define PARF_INT_ALL_AER_LEGACY BIT(14)
+#define PARF_INT_ALL_PLS_ERR BIT(15)
+#define PARF_INT_ALL_PME_LEGACY BIT(16)
+#define PARF_INT_ALL_PLS_PME BIT(17)
+
+/* PARF_BDF_TO_SID_CFG register fields */
+#define PARF_BDF_TO_SID_BYPASS BIT(0)
+
+/* PARF_DEBUG_INT_EN register fields */
+#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1)
+#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2)
+#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3)
+
+/* PARF_DEVICE_TYPE register fields */
+#define PARF_DEVICE_TYPE_EP 0x0
+
+/* PARF_PM_CTRL register fields */
+#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1)
+#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2)
+#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5)
+
+/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */
+#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0)
+
+/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */
+#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31)
+
+/* PARF_Q2A_FLUSH register fields */
+#define PARF_Q2A_FLUSH_EN BIT(16)
+
+/* PARF_SYS_CTRL register fields */
+#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4)
+#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6)
+#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11)
+
+/* PARF_DB_CTRL register fields */
+#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0)
+#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1)
+#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4)
+#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5)
+#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6)
+
+/* PARF_CFG_BITS register fields */
+#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1)
+
+/* ELBI registers */
+#define ELBI_SYS_STTS 0x08
+
+/* DBI registers */
+#define DBI_CON_STATUS 0x44
+
+/* DBI register fields */
+#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0)
+
+#define XMLH_LINK_UP 0x400
+#define CORE_RESET_TIME_US_MIN 1000
+#define CORE_RESET_TIME_US_MAX 1005
+#define WAKE_DELAY_US 2000 /* 2 ms */
+
+#define to_pcie_ep(x) dev_get_drvdata((x)->dev)
+
+enum qcom_pcie_ep_link_status {
+ QCOM_PCIE_EP_LINK_DISABLED,
+ QCOM_PCIE_EP_LINK_ENABLED,
+ QCOM_PCIE_EP_LINK_UP,
+ QCOM_PCIE_EP_LINK_DOWN,
+};
+
+static struct clk_bulk_data qcom_pcie_ep_clks[] = {
+ { .id = "cfg" },
+ { .id = "aux" },
+ { .id = "bus_master" },
+ { .id = "bus_slave" },
+ { .id = "ref" },
+ { .id = "sleep" },
+ { .id = "slave_q2a" },
+};
+
+struct qcom_pcie_ep {
+ struct dw_pcie pci;
+
+ void __iomem *parf;
+ void __iomem *elbi;
+ struct regmap *perst_map;
+ struct resource *mmio_res;
+
+ struct reset_control *core_reset;
+ struct gpio_desc *reset;
+ struct gpio_desc *wake;
+ struct phy *phy;
+
+ u32 perst_en;
+ u32 perst_sep_en;
+
+ enum qcom_pcie_ep_link_status link_status;
+ int global_irq;
+ int perst_irq;
+};
+
+static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep)
+{
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ int ret;
+
+ ret = reset_control_assert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ ret = reset_control_deassert(pcie_ep->core_reset);
+ if (ret) {
+ dev_err(dev, "Cannot de-assert core reset\n");
+ return ret;
+ }
+
+ usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX);
+
+ return 0;
+}
+
+/*
+ * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid
+ * device reset during host reboot and hibernation. The driver is
+ * expected to handle this situation.
+ */
+static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep)
+{
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0);
+ regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0);
+}
+
+static int qcom_pcie_dw_link_up(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ u32 reg;
+
+ reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS);
+
+ return reg & XMLH_LINK_UP;
+}
+
+static int qcom_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ enable_irq(pcie_ep->perst_irq);
+
+ return 0;
+}
+
+static void qcom_pcie_dw_stop_link(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+
+ disable_irq(pcie_ep->perst_irq);
+}
+
+static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+ u32 val, offset;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ /* Assert WAKE# to RC to indicate device is ready */
+ gpiod_set_value_cansleep(pcie_ep->wake, 1);
+ usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500);
+ gpiod_set_value_cansleep(pcie_ep->wake, 0);
+
+ qcom_pcie_ep_configure_tcsr(pcie_ep);
+
+ /* Disable BDF to SID mapping */
+ val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+ val |= PARF_BDF_TO_SID_BYPASS;
+ writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG);
+
+ /* Enable debug IRQ */
+ val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN);
+ val |= PARF_DEBUG_INT_RADM_PM_TURNOFF | PARF_DEBUG_INT_CFG_BUS_MASTER_EN |
+ PARF_DEBUG_INT_PM_DSTATE_CHANGE;
+ writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN);
+
+ /* Configure PCIe to endpoint mode */
+ writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE);
+
+ /* Allow entering L1 state */
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+
+ /* Read halts write */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+ val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES);
+
+ /* Write after write halt */
+ val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+ val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT);
+
+ /* Q2A flush disable */
+ val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH);
+ val &= ~PARF_Q2A_FLUSH_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH);
+
+ /* Disable DBI Wakeup, core clock CGC and enable AUX power */
+ val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL);
+ val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | PARF_SYS_CTRL_CORE_CLK_CGC_DIS |
+ PARF_SYS_CTRL_AUX_PWR_DET;
+ writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL);
+
+ /* Disable the debouncers */
+ val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL);
+ val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK |
+ PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK |
+ PARF_DB_CTRL_MST_WKP_BLOCK;
+ writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL);
+
+ /* Request to exit from L1SS for MSI and LTR MSG */
+ val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS);
+ val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN;
+ writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS);
+
+ dw_pcie_dbi_ro_wr_en(pci);
+
+ /* Set the L0s Exit Latency to 2us-4us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L0SEL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */
+ offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP);
+ val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP);
+ val &= ~PCI_EXP_LNKCAP_L1EL;
+ val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6);
+ dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val);
+
+ dw_pcie_dbi_ro_wr_dis(pci);
+
+ writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
+ val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME | PARF_INT_ALL_PM_TURNOFF |
+ PARF_INT_ALL_DSTATE_CHANGE | PARF_INT_ALL_LINK_UP;
+ writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
+
+ ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to complete initialization: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ /*
+ * The physical address of the MMIO region which is exposed as the BAR
+ * should be written to MHI BASE registers.
+ */
+ writel_relaxed(pcie_ep->mmio_res->start, pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER);
+ writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER);
+
+ dw_pcie_ep_init_notify(&pcie_ep->pci.ep);
+
+ /* Enable LTSSM */
+ val = readl_relaxed(pcie_ep->parf + PARF_LTSSM);
+ val |= BIT(8);
+ writel_relaxed(val, pcie_ep->parf + PARF_LTSSM);
+
+ return 0;
+
+err_phy_power_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static void qcom_pcie_perst_assert(struct dw_pcie *pci)
+{
+ struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci);
+ struct device *dev = pci->dev;
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) {
+ dev_dbg(dev, "Link is already disabled\n");
+ return;
+ }
+
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED;
+}
+
+/* Common DWC controller ops */
+static const struct dw_pcie_ops pci_ops = {
+ .link_up = qcom_pcie_dw_link_up,
+ .start_link = qcom_pcie_dw_start_link,
+ .stop_link = qcom_pcie_dw_stop_link,
+};
+
+static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device_node *syscon;
+ struct resource *res;
+ int ret;
+
+ pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf");
+ if (IS_ERR(pcie_ep->parf))
+ return PTR_ERR(pcie_ep->parf);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+ pci->dbi_base2 = pci->dbi_base;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
+ pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pcie_ep->elbi))
+ return PTR_ERR(pcie_ep->elbi);
+
+ pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+
+ syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0);
+ if (!syscon) {
+ dev_err(dev, "Failed to parse qcom,perst-regs\n");
+ return -EINVAL;
+ }
+
+ pcie_ep->perst_map = syscon_node_to_regmap(syscon);
+ of_node_put(syscon);
+ if (IS_ERR(pcie_ep->perst_map))
+ return PTR_ERR(pcie_ep->perst_map);
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 1, &pcie_ep->perst_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Enable offset in syscon\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs",
+ 2, &pcie_ep->perst_sep_en);
+ if (ret < 0) {
+ dev_err(dev, "No Perst Separation Enable offset in syscon\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_get_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get io resources %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks),
+ qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core");
+ if (IS_ERR(pcie_ep->core_reset))
+ return PTR_ERR(pcie_ep->core_reset);
+
+ pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN);
+ if (IS_ERR(pcie_ep->reset))
+ return PTR_ERR(pcie_ep->reset);
+
+ pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie_ep->wake))
+ return PTR_ERR(pcie_ep->wake);
+
+ pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy");
+ if (IS_ERR(pcie_ep->phy))
+ ret = PTR_ERR(pcie_ep->phy);
+
+ return ret;
+}
+
+/* TODO: Notify clients about PCIe state change */
+static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS);
+ u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK);
+ u32 dstate, val;
+
+ writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR);
+ status &= mask;
+
+ if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) {
+ dev_dbg(dev, "Received Linkdown event\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN;
+ } else if (FIELD_GET(PARF_INT_ALL_BME, status)) {
+ dev_dbg(dev, "Received BME event. Link is enabled!\n");
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED;
+ pci_epc_bme_notify(pci->ep.epc);
+ } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) {
+ dev_dbg(dev, "Received PM Turn-off event! Entering L23\n");
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_READY_ENTR_L23;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) {
+ dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) &
+ DBI_CON_STATUS_POWER_STATE_MASK;
+ dev_dbg(dev, "Received D%d state event\n", dstate);
+ if (dstate == 3) {
+ val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL);
+ val |= PARF_PM_CTRL_REQ_EXIT_L1;
+ writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL);
+ }
+ } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) {
+ dev_dbg(dev, "Received Linkup event. Enumeration complete!\n");
+ dw_pcie_ep_linkup(&pci->ep);
+ pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP;
+ } else {
+ dev_dbg(dev, "Received unknown event: %d\n", status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data)
+{
+ struct qcom_pcie_ep *pcie_ep = data;
+ struct dw_pcie *pci = &pcie_ep->pci;
+ struct device *dev = pci->dev;
+ u32 perst;
+
+ perst = gpiod_get_value(pcie_ep->reset);
+ if (perst) {
+ dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n");
+ qcom_pcie_perst_assert(pci);
+ } else {
+ dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n");
+ qcom_pcie_perst_deassert(pci);
+ }
+
+ irq_set_irq_type(gpiod_to_irq(pcie_ep->reset),
+ (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW));
+
+ return IRQ_HANDLED;
+}
+
+static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
+ struct qcom_pcie_ep *pcie_ep)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, "global");
+ if (irq < 0) {
+ dev_err(&pdev->dev, "Failed to get Global IRQ\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+ qcom_pcie_ep_global_irq_thread,
+ IRQF_ONESHOT,
+ "global_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request Global IRQ\n");
+ return ret;
+ }
+
+ pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset);
+ irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN);
+ ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL,
+ qcom_pcie_ep_perst_irq_thread,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "perst_irq", pcie_ep);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request PERST IRQ\n");
+ disable_irq(irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
+ enum pci_epc_irq_type type, u16 interrupt_num)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+
+ switch (type) {
+ case PCI_EPC_IRQ_LEGACY:
+ return dw_pcie_ep_raise_legacy_irq(ep, func_no);
+ case PCI_EPC_IRQ_MSI:
+ return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
+ default:
+ dev_err(pci->dev, "Unknown IRQ type\n");
+ return -EINVAL;
+ }
+}
+
+static const struct pci_epc_features qcom_pcie_epc_features = {
+ .linkup_notifier = true,
+ .core_init_notifier = true,
+ .msi_capable = true,
+ .msix_capable = false,
+};
+
+static const struct pci_epc_features *
+qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep)
+{
+ return &qcom_pcie_epc_features;
+}
+
+static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ enum pci_barno bar;
+
+ for (bar = BAR_0; bar <= BAR_5; bar++)
+ dw_pcie_ep_reset_bar(pci, bar);
+}
+
+static struct dw_pcie_ep_ops pci_ep_ops = {
+ .ep_init = qcom_pcie_ep_init,
+ .raise_irq = qcom_pcie_ep_raise_irq,
+ .get_features = qcom_pcie_epc_get_features,
+};
+
+static int qcom_pcie_ep_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qcom_pcie_ep *pcie_ep;
+ int ret;
+
+ pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL);
+ if (!pcie_ep)
+ return -ENOMEM;
+
+ pcie_ep->pci.dev = dev;
+ pcie_ep->pci.ops = &pci_ops;
+ pcie_ep->pci.ep.ops = &pci_ep_ops;
+ platform_set_drvdata(pdev, pcie_ep);
+
+ ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+ if (ret)
+ return ret;
+
+ ret = qcom_pcie_ep_core_reset(pcie_ep);
+ if (ret)
+ goto err_disable_clk;
+
+ ret = phy_init(pcie_ep->phy);
+ if (ret)
+ goto err_disable_clk;
+
+ /* PHY needs to be powered on for dw_pcie_ep_init() */
+ ret = phy_power_on(pcie_ep->phy);
+ if (ret)
+ goto err_phy_exit;
+
+ ret = dw_pcie_ep_init(&pcie_ep->pci.ep);
+ if (ret) {
+ dev_err(dev, "Failed to initialize endpoint:%d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep);
+ if (ret)
+ goto err_phy_power_off;
+
+ return 0;
+
+err_phy_power_off:
+ phy_power_off(pcie_ep->phy);
+err_phy_exit:
+ phy_exit(pcie_ep->phy);
+err_disable_clk:
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+
+ return ret;
+}
+
+static int qcom_pcie_ep_remove(struct platform_device *pdev)
+{
+ struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev);
+
+ if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED)
+ return 0;
+
+ phy_power_off(pcie_ep->phy);
+ phy_exit(pcie_ep->phy);
+ clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks);
+
+ return 0;
+}
+
+static const struct of_device_id qcom_pcie_ep_match[] = {
+ { .compatible = "qcom,sdx55-pcie-ep", },
+ { }
+};
+
+static struct platform_driver qcom_pcie_ep_driver = {
+ .probe = qcom_pcie_ep_probe,
+ .remove = qcom_pcie_ep_remove,
+ .driver = {
+ .name = "qcom-pcie-ep",
+ .of_match_table = qcom_pcie_ep_match,
+ },
+};
+builtin_platform_driver(qcom_pcie_ep_driver);
+
+MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig
index 5f1242ca2f4e..edc17505e883 100644
--- a/drivers/pci/endpoint/functions/Kconfig
+++ b/drivers/pci/endpoint/functions/Kconfig
@@ -25,3 +25,12 @@ config PCI_EPF_NTB
device tree.
If in doubt, say "N" to disable Endpoint NTB driver.
+
+config PCI_EPF_MHI
+ tristate "PCI Endpoint controller driver for MHI bus"
+ depends on PCI_ENDPOINT && MHI_BUS_EP
+ help
+ Enable this configuration option to enable the PCI Endpoint
+ driver for MHI device.
+
+ If in doubt, say "N" to disable Endpoint driver for MHI device.
diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile
index 96ab932a537a..eee99b2e9103 100644
--- a/drivers/pci/endpoint/functions/Makefile
+++ b/drivers/pci/endpoint/functions/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o
obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o
+obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o
diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c
new file mode 100644
index 000000000000..cd63507bb0ff
--- /dev/null
+++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI EPF controller driver for MHI Endpoint device
+ *
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+ */
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci_ids.h>
+#include <linux/random.h>
+#include <linux/of_address.h>
+#include <linux/pci_regs.h>
+#include <linux/platform_device.h>
+
+#include <linux/mhi_ep.h>
+#include <linux/pci_regs.h>
+#include <linux/pci-epc.h>
+#include <linux/pci-epf.h>
+
+#define MHI_VERSION_1_0 0x01000000
+
+struct pci_epf_mhi_ep_info {
+ const struct mhi_ep_cntrl_config *config;
+ struct pci_epf_header *epf_header;
+ enum pci_barno bar_num;
+ u32 epf_flags;
+ u32 msi_count;
+};
+
+#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_TO_DEVICE, \
+ }
+
+#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \
+ { \
+ .num = ch_num, \
+ .name = ch_name, \
+ .dir = DMA_FROM_DEVICE, \
+ }
+
+static const struct mhi_ep_channel_config mhi_v1_channels[] = {
+ MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"),
+ MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"),
+ MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"),
+ MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"),
+ MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"),
+ MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"),
+ MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"),
+ MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"),
+ MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"),
+ MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"),
+ MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"),
+ MHI_EP_CHANNEL_CONFIG_UL(36, "IP_SW0"),
+ MHI_EP_CHANNEL_CONFIG_DL(37, "IP_SW0"),
+};
+
+static const struct mhi_ep_cntrl_config mhi_v1_config = {
+ .max_channels = 128,
+ .num_channels = ARRAY_SIZE(mhi_v1_channels),
+ .ch_cfg = mhi_v1_channels,
+ .mhi_version = MHI_VERSION_1_0,
+};
+
+static struct pci_epf_header sdx55_header = {
+ .vendorid = 0x17cb,
+ .deviceid = 0x0306,
+ .revid = 0x0,
+ .progif_code = 0x0,
+ .subclass_code = 0x0,
+ .baseclass_code = 0xff,
+ .cache_line_size = 0x10,
+ .subsys_vendor_id = 0x0,
+ .subsys_id = 0x0,
+};
+
+static const struct pci_epf_mhi_ep_info sdx55_info = {
+ .config = &mhi_v1_config,
+ .epf_header = &sdx55_header,
+ .bar_num = BAR_0,
+ .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
+ .msi_count = 4,
+};
+
+struct pci_epf_mhi {
+ struct mhi_ep_cntrl mhi_cntrl;
+ struct pci_epf *epf;
+ const struct pci_epf_mhi_ep_info *info;
+ void __iomem *mmio;
+ resource_size_t mmio_phys;
+ u32 mmio_size;
+ int irq;
+};
+
+void __iomem *pci_epf_mhi_alloc_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t *phys_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epc *epc = epf_mhi->epf->epc;
+
+ return pci_epc_mem_alloc_addr(epc, phys_addr, size);
+}
+
+void pci_epf_mhi_free_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epc *epc = epf_mhi->epf->epc;
+
+ pci_epc_mem_free_addr(epc, phys_addr, virt_addr, size);
+}
+
+inline int pci_epf_mhi_map_addr(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, u64 pci_addr, size_t size)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ return pci_epc_map_addr(epc, epf->func_no, phys_addr, pci_addr, size);
+}
+
+void pci_epf_mhi_unmap_addr(struct mhi_ep_cntrl *mhi_cntrl, phys_addr_t phys_addr)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
+}
+
+void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl);
+ struct pci_epf *epf = epf_mhi->epf;
+ struct pci_epc *epc = epf->epc;
+
+ pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, 1);
+}
+
+static int pci_epf_mhi_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
+ struct pci_epf_mhi_ep_info *info = epf_mhi->info;
+ struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num];
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+ u32 dstate;
+
+ switch (val) {
+ case CORE_INIT:
+ ret = pci_epc_write_header(epc, epf->func_no, info->epf_header);
+ if (ret) {
+ dev_err(dev, "Configuration header write failed: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ epf_bar->phys_addr = epf_mhi->mmio_phys;
+ epf_bar->size = epf_mhi->mmio_size;
+ epf_bar->barno = info->bar_num;
+ epf_bar->flags = info->epf_flags;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
+ if (ret) {
+ dev_err(dev, "Failed to set BAR0: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ ret = pci_epc_set_msi(epc, epf->func_no, order_base_2(info->msi_count));
+ if (ret) {
+ dev_err(dev, "MSI configuration failed: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ mhi_cntrl->mmio = epf_mhi->mmio;
+ mhi_cntrl->irq = epf_mhi->irq;
+
+ /* Assign the struct dev of PCI EP as MHI controller device */
+ mhi_cntrl->cntrl_dev = epc->dev.parent;
+ mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
+ mhi_cntrl->alloc_addr = pci_epf_mhi_alloc_addr;
+ mhi_cntrl->free_addr = pci_epf_mhi_free_addr;
+ mhi_cntrl->map_addr = pci_epf_mhi_map_addr;
+ mhi_cntrl->unmap_addr = pci_epf_mhi_unmap_addr;
+
+ ret = mhi_ep_register_controller(mhi_cntrl, info->config);
+ if (ret) {
+ dev_err(dev, "Failed to register MHI EP controller\n");
+ return NOTIFY_BAD;
+ }
+
+ break;
+ case LINK_UP:
+ break;
+ case BME:
+ mhi_ep_power_up(mhi_cntrl);
+ break;
+ case D_STATE:
+ dstate = (int)data;
+
+ if (dstate == 0)
+ dev_info(dev, "Received D0 event\n");
+ break;
+ default:
+ dev_err(&epf->dev, "Invalid MHI device notifier event: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int pci_epf_mhi_bind(struct pci_epf *epf)
+{
+ struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
+ struct pci_epc *epc = epf->epc;
+ struct platform_device *pdev = to_platform_device(epc->dev.parent);
+ struct device *dev = &epf->dev;
+ struct resource *res;
+ int ret;
+
+ if (WARN_ON_ONCE(!epc))
+ return -EINVAL;
+
+ /* Get MMIO physical and virtual address from controller device */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio");
+ epf_mhi->mmio_phys = res->start;
+ epf_mhi->mmio_size = resource_size(res);
+
+ epf_mhi->mmio = devm_ioremap_wc(dev, epf_mhi->mmio_phys, epf_mhi->mmio_size);
+ if (IS_ERR(epf_mhi->mmio))
+ return PTR_ERR(epf_mhi->mmio);
+
+ ret = platform_get_irq_byname(pdev, "doorbell");
+ if (ret < 0) {
+ dev_err(dev, "Failed to get Doorbell IRQ\n");
+ return ret;
+ }
+
+ epf_mhi->irq = ret;
+ epf->nb.notifier_call = pci_epf_mhi_notifier;
+ pci_epc_register_notifier(epc, &epf->nb);
+
+ return 0;
+}
+
+static void pci_epf_mhi_unbind(struct pci_epf *epf)
+{
+ struct pci_epc *epc = epf->epc;
+ struct pci_epf_bar *epf_bar = &epf->bar[0];
+
+ pci_epc_clear_bar(epc, epf->func_no, epf_bar);
+}
+
+static int pci_epf_mhi_probe(struct pci_epf *epf)
+{
+ struct pci_epf_mhi_ep_info *info =
+ (struct pci_epf_mhi_ep_info *) epf->driver->id_table->driver_data;
+ struct pci_epf_mhi *epf_mhi;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL);
+ if (!epf_mhi)
+ return -ENOMEM;
+
+ epf_mhi->info = info;
+ epf_mhi->epf = epf;
+ epf_set_drvdata(epf, epf_mhi);
+
+ return 0;
+}
+
+static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
+ {
+ .name = "pci_epf_mhi", .driver_data = (kernel_ulong_t) &sdx55_info,
+ },
+ {},
+};
+
+static struct pci_epf_ops pci_epf_mhi_ops = {
+ .unbind = pci_epf_mhi_unbind,
+ .bind = pci_epf_mhi_bind,
+};
+
+static struct pci_epf_driver pci_epf_mhi_driver = {
+ .driver.name = "pci_epf_mhi",
+ .probe = pci_epf_mhi_probe,
+ .id_table = pci_epf_mhi_ids,
+ .ops = &pci_epf_mhi_ops,
+ .owner = THIS_MODULE,
+};
+
+static int __init pci_epf_mhi_init(void)
+{
+ int ret;
+
+ ret = pci_epf_register_driver(&pci_epf_mhi_driver);
+ if (ret) {
+ pr_err("Failed to register PCI EPF MHI driver: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pci_epf_mhi_init);
+
+static void __exit pci_epf_mhi_exit(void)
+{
+ pci_epf_unregister_driver(&pci_epf_mhi_driver);
+}
+module_exit(pci_epf_mhi_exit);
+
+MODULE_DESCRIPTION("PCI EPF Controller driver for MHI Device");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index ecbb0fb3b653..5e7e0845de82 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -698,6 +698,23 @@ void pci_epc_linkup(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_linkup);
/**
+ * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the
+ * connection with the Root Complex.
+ * @epc: the EPC device which has dropped the link with the host
+ *
+ * Invoke to Notify the EPF device that the EPC device has dropped the
+ * connection with the Root Complex.
+ */
+void pci_epc_linkdown(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, LINK_DOWN, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_linkdown);
+
+/**
* pci_epc_init_notify() - Notify the EPF device that EPC device's core
* initialization is completed.
* @epc: the EPC device whose core initialization is completeds
@@ -715,6 +732,78 @@ void pci_epc_init_notify(struct pci_epc *epc)
EXPORT_SYMBOL_GPL(pci_epc_init_notify);
/**
+ * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received
+ * the BME event from the Root complex
+ * @epc: the EPC device that received the BME event
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Bus
+ * Master Enable (BME) event from the Root complex
+ */
+void pci_epc_bme_notify(struct pci_epc *epc)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, BME, NULL);
+}
+EXPORT_SYMBOL_GPL(pci_epc_bme_notify);
+
+/**
+ * pci_epc_pme_notify() - Notify the EPF device that the EPC device has received
+ * the PME from the Root complex
+ * @epc: the EPC device that received the PME
+ * @data: Data for the PME notifier
+ *
+ * Invoke to Notify the EPF device that the EPC device has received the Power
+ * Management Event (PME) from the Root complex
+ */
+void pci_epc_pme_notify(struct pci_epc *epc, void *data)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, PME, data);
+}
+EXPORT_SYMBOL_GPL(pci_epc_pme_notify);
+
+/**
+ * pci_epc_d_state_notify() - Notify the EPF device that the EPC device has
+ * received the Device State event from Root complex
+ * @epc: the EPC device that received the Device State event
+ * @data: Data for the D_STATE notifier
+ *
+ * Invoke to notify the EPF device that the EPC device has received the Device
+ * State (D_STATE) event from the Root complex
+ */
+void pci_epc_d_state_notify(struct pci_epc *epc, void *data)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, D_STATE, data);
+}
+EXPORT_SYMBOL_GPL(pci_epc_d_state_notify);
+
+/**
+ * pci_epc_custom_notify() - Notify the EPF device that the EPC device has
+ * received the custom events from the Root complex
+ * @epc: EPC device that received the custom event
+ * @data: Data for the CUSTOM notifier
+ *
+ * Invoke to notify the EPF device that the EPC device has received the Custom
+ * event from the Root complex. The custom event is EPC/vendor specific and is
+ * shared with the EPF device.
+ */
+void pci_epc_custom_notify(struct pci_epc *epc, void *data)
+{
+ if (!epc || IS_ERR(epc))
+ return;
+
+ atomic_notifier_call_chain(&epc->notifier, CUSTOM, data);
+}
+EXPORT_SYMBOL_GPL(pci_epc_custom_notify);
+
+/**
* pci_epc_destroy() - destroy the EPC device
* @epc: the EPC device that has to be destroyed
*
diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h
new file mode 100644
index 000000000000..ca1b3c26f05f
--- /dev/null
+++ b/include/linux/mhi_ep.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+#ifndef _MHI_EP_H_
+#define _MHI_EP_H_
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/mhi.h>
+
+struct mhi_ep_chan;
+struct mhi_ep_cmd;
+struct mhi_ep_event;
+struct mhi_ep_cmd_ctx;
+struct mhi_ep_ev_ctx;
+struct mhi_ep_ch_ctx;
+
+struct mhi_ep_channel_config {
+ char *name;
+ u32 num;
+ u32 num_elements;
+ enum dma_data_direction dir;
+};
+
+struct mhi_ep_cntrl_config {
+ u32 max_channels;
+ u32 num_channels;
+ const struct mhi_ep_channel_config *ch_cfg;
+ u32 mhi_version;
+};
+
+struct mhi_ep_interrupt_state {
+ u32 mask;
+ u32 status;
+};
+
+struct mhi_ep_cntrl {
+ struct device *cntrl_dev;
+ struct mhi_ep_device *mhi_dev;
+ void __iomem *mmio;
+ int irq;
+
+ u32 max_chan;
+ struct mhi_ep_chan *mhi_chan;
+ struct mhi_ep_cmd *mhi_cmd;
+ struct mhi_ep_event *mhi_event;
+ struct mhi_ep_sm *sm;
+
+ /* Host control base information */
+ struct mhi_ep_ch_ctx *ch_ctx_cache;
+ struct mhi_ep_ev_ctx *ev_ctx_cache;
+ struct mhi_ep_cmd_ctx *cmd_ctx_cache;
+
+ u64 ch_ctx_host_pa;
+ u64 ev_ctx_host_pa;
+ u64 cmd_ctx_host_pa;
+
+ struct workqueue_struct *init_wq;
+ struct workqueue_struct *ring_wq;
+ struct work_struct init_work;
+ struct work_struct chdb_ctrl_work;
+ struct work_struct ring_work;
+
+ struct list_head process_ring_list;
+
+ struct mutex lock;
+ struct mutex event_lock;
+
+ /* CHDB and EVDB device interrupt state */
+ struct mhi_ep_interrupt_state chdb[4];
+ struct mhi_ep_interrupt_state evdb[4];
+
+ u32 reg_len;
+ u32 version;
+ u32 event_rings;
+ u32 hw_event_rings;
+ u32 channels;
+ u32 chdb_offset;
+ u32 erdb_offset;
+
+ void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl);
+ void __iomem *(*alloc_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t *phys_addr, size_t size);
+ void (*free_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, void __iomem *virt_addr, size_t size);
+ int (*map_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr, u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct mhi_ep_cntrl *mhi_cntrl,
+ phys_addr_t phys_addr);
+};
+
+struct mhi_ep_device {
+ struct mhi_ep_cntrl *mhi_cntrl;
+ const struct mhi_device_id *id;
+ const char *name;
+ struct device dev;
+ struct mhi_ep_chan *ul_chan;
+ struct mhi_ep_chan *dl_chan;
+ enum mhi_device_type dev_type;
+ int ul_chan_id;
+ int dl_chan_id;
+};
+
+struct mhi_ep_driver {
+ const struct mhi_device_id *id_table;
+ struct device_driver driver;
+ int (*probe)(struct mhi_ep_device *mhi_ep,
+ const struct mhi_device_id *id);
+ void (*remove)(struct mhi_ep_device *mhi_ep);
+ void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+ void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *result);
+};
+
+#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev)
+#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver)
+
+/*
+ * module_mhi_ep_driver() - Helper macro for drivers that don't do
+ * anything special other than using default mhi_ep_driver_register() and
+ * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate.
+ * Each module may only use this macro once.
+ */
+#define module_mhi_ep_driver(mhi_drv) \
+ module_driver(mhi_drv, mhi_ep_driver_register, \
+ mhi_ep_driver_unregister)
+
+/*
+ * Macro to avoid include chaining to get THIS_MODULE
+ */
+#define mhi_ep_driver_register(mhi_drv) \
+ __mhi_ep_driver_register(mhi_drv, THIS_MODULE)
+
+int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner);
+void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv);
+
+int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
+ const struct mhi_ep_cntrl_config *config);
+void mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl);
+
+int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir,
+ struct sk_buff *skb, size_t len, enum mhi_flags mflags);
+
+#endif
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index a48778e1a4ee..a9fe577bb551 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -209,7 +209,12 @@ void pci_epc_destroy(struct pci_epc *epc);
int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_linkdown(struct pci_epc *epc);
void pci_epc_init_notify(struct pci_epc *epc);
+void pci_epc_bme_notify(struct pci_epc *epc);
+void pci_epc_pme_notify(struct pci_epc *epc, void *data);
+void pci_epc_d_state_notify(struct pci_epc *epc, void *data);
+void pci_epc_custom_notify(struct pci_epc *epc, void *data);
void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf,
enum pci_epc_interface_type type);
int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 009a07147c61..da360659c322 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -20,6 +20,11 @@ enum pci_epc_interface_type;
enum pci_notify_event {
CORE_INIT,
LINK_UP,
+ LINK_DOWN,
+ BME,
+ PME,
+ D_STATE,
+ CUSTOM,
};
enum pci_barno {
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index b4020b84760f..7d843e5dd88a 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -35,4 +35,11 @@ config QRTR_MHI
Say Y here to support MHI based ipcrouter channels. MHI is the
transport used for communicating to external modems.
+config QRTR_MHI_EP
+ tristate "MHI Endpoint IPC Router channels"
+ depends on MHI_BUS_EP
+ help
+ Say Y here to support MHI Endpoint based ipcrouter channels. MHI is
+ the transport used for communicating to external modems.
+
endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index 1b1411d158a7..49db0e1dba66 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -7,3 +7,5 @@ obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o
qrtr-tun-y := tun.o
obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o
qrtr-mhi-y := mhi.o
+obj-$(CONFIG_QRTR_MHI_EP) += qrtr-mhi-ep.o
+qrtr-mhi-ep-y := mhi-ep.o
diff --git a/net/qrtr/mhi-ep.c b/net/qrtr/mhi-ep.c
new file mode 100644
index 000000000000..3837179d5025
--- /dev/null
+++ b/net/qrtr/mhi-ep.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/mhi_ep.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+
+#include "qrtr.h"
+
+struct qrtr_mhi_dev {
+ struct qrtr_endpoint ep;
+ struct mhi_ep_device *mhi_dev;
+ struct device *dev;
+ struct completion out_tre;
+ struct mutex out_lock;
+};
+
+/* Callback from host to notify available buffers to queue */
+static void qcom_mhi_qrtr_dl_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = mhi_res->buf_addr;
+
+ mutex_lock(&qdev->out_lock);
+ complete_all(&qdev->out_tre);
+ mutex_unlock(&qdev->out_lock);
+}
+
+static void qcom_mhi_qrtr_ul_callback(struct mhi_ep_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+ int rc;
+
+ /* TODO */
+ if (!qdev)
+ return;
+
+ rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr,
+ mhi_res->bytes_xferd);
+ if (rc == -EINVAL)
+ dev_err(qdev->dev, "invalid ipcrouter packet\n");
+}
+
+/* Send data over MHI */
+static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+ struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
+ int rc;
+
+ if (skb->sk)
+ sock_hold(skb->sk);
+
+ rc = skb_linearize(skb);
+ if (rc)
+ goto free_skb;
+
+ wait_for_completion(&qdev->out_tre);
+
+ rc = mhi_ep_queue_skb(qdev->mhi_dev, DMA_FROM_DEVICE, skb, skb->len,
+ MHI_EOT);
+ if (rc)
+ goto free_skb;
+
+ reinit_completion(&qdev->out_tre);
+ consume_skb(skb);
+
+ return rc;
+
+free_skb:
+ if (skb->sk)
+ sock_put(skb->sk);
+ kfree_skb(skb);
+
+ return rc;
+}
+
+static int qcom_mhi_qrtr_probe(struct mhi_ep_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct qrtr_mhi_dev *qdev;
+ int rc;
+
+ /* start channels */
+// rc = mhi_ep_prepare_for_transfer(mhi_dev);
+// if (rc)
+// return rc;
+
+ qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
+ if (!qdev)
+ return -ENOMEM;
+
+ qdev->mhi_dev = mhi_dev;
+ qdev->dev = &mhi_dev->dev;
+ init_completion(&qdev->out_tre);
+ mutex_init(&qdev->out_lock);
+ qdev->ep.xmit = qcom_mhi_qrtr_send;
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+ if (rc)
+ return rc;
+
+ dev_dbg(qdev->dev, "Qualcomm MHI QRTR driver probed\n");
+
+ return 0;
+}
+
+static void qcom_mhi_qrtr_remove(struct mhi_ep_device *mhi_dev)
+{
+ struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+
+ qrtr_endpoint_unregister(&qdev->ep);
+// mhi_unprepare_from_transfer(mhi_dev);
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct mhi_device_id qcom_mhi_qrtr_id_table[] = {
+ { .chan = "IPCR" },
+ {}
+};
+MODULE_DEVICE_TABLE(mhi, qcom_mhi_qrtr_id_table);
+
+static struct mhi_ep_driver qcom_mhi_qrtr_driver = {
+ .probe = qcom_mhi_qrtr_probe,
+ .remove = qcom_mhi_qrtr_remove,
+ .dl_xfer_cb = qcom_mhi_qrtr_dl_callback,
+ .ul_xfer_cb = qcom_mhi_qrtr_ul_callback,
+ .id_table = qcom_mhi_qrtr_id_table,
+ .driver = {
+ .name = "qcom_mhi_qrtr",
+ },
+};
+
+module_mhi_ep_driver(qcom_mhi_qrtr_driver);
+
+MODULE_AUTHOR("Chris Lew <clew@codeaurora.org>");
+MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm IPC-Router MHI interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c
index 1990d496fcfc..e25fd4d1d701 100644
--- a/net/qrtr/ns.c
+++ b/net/qrtr/ns.c
@@ -189,9 +189,11 @@ static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv,
static int announce_servers(struct sockaddr_qrtr *sq)
{
+ struct radix_tree_iter node_iter;
struct radix_tree_iter iter;
struct qrtr_server *srv;
struct qrtr_node *node;
+ void __rcu **node_slot;
void __rcu **slot;
int ret;
@@ -200,25 +202,37 @@ static int announce_servers(struct sockaddr_qrtr *sq)
return 0;
rcu_read_lock();
- /* Announce the list of servers registered in this node */
- radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
- srv = radix_tree_deref_slot(slot);
- if (!srv)
+
+ /* Iterate through all neighbor nodes */
+ radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) {
+ node = radix_tree_deref_slot(node_slot);
+ if (!node)
continue;
- if (radix_tree_deref_retry(srv)) {
- slot = radix_tree_iter_retry(&iter);
+ if (radix_tree_deref_retry(node)) {
+ node_slot = radix_tree_iter_retry(&node_iter);
continue;
}
- slot = radix_tree_iter_resume(slot, &iter);
- rcu_read_unlock();
+ node_slot = radix_tree_iter_resume(node_slot, &node_iter);
- ret = service_announce_new(sq, srv);
- if (ret < 0) {
- pr_err("failed to announce new service\n");
- return ret;
- }
+ /* Announce the list of servers registered in this node */
+ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) {
+ srv = radix_tree_deref_slot(slot);
+ if (!srv)
+ continue;
+ if (radix_tree_deref_retry(srv)) {
+ slot = radix_tree_iter_retry(&iter);
+ continue;
+ }
+ slot = radix_tree_iter_resume(slot, &iter);
- rcu_read_lock();
+ rcu_read_unlock();
+ ret = service_announce_new(sq, srv);
+ if (ret < 0) {
+ pr_err("failed to announce new service\n");
+ return ret;
+ }
+ rcu_read_lock();
+ }
}
rcu_read_unlock();
@@ -775,10 +789,8 @@ int qrtr_ns_init(void)
}
qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1);
- if (!qrtr_ns.workqueue) {
- ret = -ENOMEM;
+ if (!qrtr_ns.workqueue)
goto err_sock;
- }
qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index ec2322529727..384d6e3eca07 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -97,7 +97,7 @@ static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
return container_of(sk, struct qrtr_sock, sk);
}
-static unsigned int qrtr_local_nid = 1;
+static unsigned int qrtr_local_nid = 2;
/* for node ids */
static RADIX_TREE(qrtr_nodes, GFP_ATOMIC);
@@ -132,6 +132,12 @@ struct qrtr_node {
struct sk_buff_head rx_queue;
struct list_head item;
+ struct work_struct read_data;
+ struct work_struct say_hello;
+ struct workqueue_struct *wq;
+ struct task_struct *task;
+
+ atomic_t hello_sent;
};
/**
@@ -343,6 +349,15 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
size_t len = skb->len;
int rc, confirm_rx;
+ if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO) {
+ kfree_skb(skb);
+ return rc;
+ }
+ if (atomic_read(&node->hello_sent) && type == QRTR_TYPE_HELLO) {
+ kfree_skb(skb);
+ return 0;
+ }
+
confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
if (confirm_rx < 0) {
kfree_skb(skb);
@@ -376,6 +391,10 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
kfree_skb(skb);
mutex_unlock(&node->ep_lock);
}
+
+ if (!rc && type == QRTR_TYPE_HELLO)
+ atomic_inc(&node->hello_sent);
+
/* Need to ensure that a subsequent message carries the otherwise lost
* confirm_rx flag if we dropped this one */
if (rc && confirm_rx)
@@ -420,6 +439,137 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
spin_unlock_irqrestore(&qrtr_nodes_lock, flags);
}
+static bool qrtr_must_forward(struct qrtr_node *src,
+ struct qrtr_node *dst, u32 type)
+{
+ /* Node structure is not maintained for local processor.
+ * Hence src is null in that case.
+ */
+ if (!src)
+ return true;
+
+ if (!dst)
+ return false;
+
+ if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+ return false;
+
+ if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+ return false;
+
+ if (abs(dst->nid - src->nid) > 1)
+ return true;
+
+ return false;
+}
+
+static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
+{
+ struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+ struct radix_tree_iter iter;
+ struct qrtr_node *node;
+ struct qrtr_node *src;
+ void __rcu **slot;
+
+ mutex_lock(&qrtr_node_lock);
+ src = qrtr_node_lookup(cb->src_node);
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ node = *slot;
+ struct sockaddr_qrtr from;
+ struct sockaddr_qrtr to;
+ struct sk_buff *skbn;
+
+ if (!qrtr_must_forward(src, node, cb->type))
+ continue;
+
+ skbn = skb_clone(skb, GFP_KERNEL);
+ if (!skbn)
+ break;
+
+ from.sq_family = AF_QIPCRTR;
+ from.sq_node = cb->src_node;
+ from.sq_port = cb->src_port;
+
+ to.sq_family = AF_QIPCRTR;
+ to.sq_node = node->nid;
+ to.sq_port = QRTR_PORT_CTRL;
+
+ qrtr_node_enqueue(node, skbn, cb->type, &from, &to);
+ }
+ mutex_unlock(&qrtr_node_lock);
+ qrtr_node_release(src);
+}
+
+static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
+{
+ struct sockaddr_qrtr from = {AF_QIPCRTR, cb->src_node, cb->src_port};
+ struct sockaddr_qrtr to = {AF_QIPCRTR, cb->dst_node, cb->dst_port};
+ struct qrtr_node *node;
+
+ node = qrtr_node_lookup(cb->dst_node);
+ if (!node) {
+ kfree_skb(skb);
+ return;
+ }
+
+ qrtr_node_enqueue(node, skb, cb->type, &from, &to);
+ qrtr_node_release(node);
+}
+
+/* Handle and route a received packet.
+ *
+ * This will auto-reply with resume-tx packet as necessary.
+ */
+static void qrtr_node_rx_work(struct work_struct *work)
+{
+ struct qrtr_node *node = container_of(work, struct qrtr_node,
+ read_data);
+ struct qrtr_ctrl_pkt *pkt;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
+ struct qrtr_sock *ipc;
+ struct qrtr_cb *cb;
+
+ cb = (struct qrtr_cb *)skb->cb;
+ qrtr_node_assign(node, cb->src_node);
+
+ if (cb->type != QRTR_TYPE_DATA)
+ qrtr_fwd_ctrl_pkt(skb);
+
+ if (cb->type == QRTR_TYPE_NEW_SERVER) {
+ /* Remote node endpoint can bridge other distant nodes */
+ pkt = (struct qrtr_ctrl_pkt *)skb->data;
+ qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
+ }
+
+ if (cb->type == QRTR_TYPE_RESUME_TX) {
+ if (cb->dst_node != qrtr_local_nid) {
+ qrtr_fwd_pkt(skb, cb);
+ continue;
+ }
+ qrtr_tx_resume(node, skb);
+ consume_skb(skb);
+ } else if (cb->dst_node != qrtr_local_nid &&
+ cb->type == QRTR_TYPE_DATA) {
+ qrtr_fwd_pkt(skb, cb);
+ } else {
+ ipc = qrtr_port_lookup(cb->dst_port);
+ if (!ipc) {
+ kfree_skb(skb);
+ return;
+ }
+
+ if (sock_queue_rcv_skb(&ipc->sk, skb)) {
+ qrtr_port_put(ipc);
+ kfree_skb(skb);
+ }
+
+ qrtr_port_put(ipc);
+ }
+ }
+}
+
/**
* qrtr_endpoint_post() - post incoming data
* @ep: endpoint handle
@@ -433,10 +583,9 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
struct qrtr_node *node = ep->node;
const struct qrtr_hdr_v1 *v1;
const struct qrtr_hdr_v2 *v2;
- struct qrtr_sock *ipc;
struct sk_buff *skb;
struct qrtr_cb *cb;
- size_t size;
+ unsigned int size;
unsigned int ver;
size_t hdrlen;
@@ -502,33 +651,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
skb_put_data(skb, data + hdrlen, size);
- qrtr_node_assign(node, cb->src_node);
-
- if (cb->type == QRTR_TYPE_NEW_SERVER) {
- /* Remote node endpoint can bridge other distant nodes */
- const struct qrtr_ctrl_pkt *pkt;
-
- if (size < sizeof(*pkt))
- goto err;
-
- pkt = data + hdrlen;
- qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
- }
-
- if (cb->type == QRTR_TYPE_RESUME_TX) {
- qrtr_tx_resume(node, skb);
- } else {
- ipc = qrtr_port_lookup(cb->dst_port);
- if (!ipc)
- goto err;
-
- if (sock_queue_rcv_skb(&ipc->sk, skb)) {
- qrtr_port_put(ipc);
- goto err;
- }
-
- qrtr_port_put(ipc);
- }
+ skb_queue_tail(&node->rx_queue, skb);
+ queue_work(node->wq, &node->read_data);
return 0;
@@ -565,6 +689,33 @@ static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt,
return skb;
}
+static void qrtr_hello_work(struct work_struct *work)
+{
+ struct sockaddr_qrtr from = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct sockaddr_qrtr to = {AF_QIPCRTR, 0, QRTR_PORT_CTRL};
+ struct qrtr_ctrl_pkt *pkt;
+ struct qrtr_node *node;
+ struct qrtr_sock *ctrl;
+ struct sk_buff *skb;
+
+ ctrl = qrtr_port_lookup(QRTR_PORT_CTRL);
+ if (!ctrl)
+ return;
+
+ skb = qrtr_alloc_ctrl_packet(&pkt, GFP_KERNEL);
+ if (!skb) {
+ qrtr_port_put(ctrl);
+ return;
+ }
+
+ node = container_of(work, struct qrtr_node, say_hello);
+ pkt->cmd = cpu_to_le32(QRTR_TYPE_HELLO);
+ from.sq_node = qrtr_local_nid;
+
+ qrtr_node_enqueue(node, skb, QRTR_TYPE_HELLO, &from, &to);
+ qrtr_port_put(ctrl);
+}
+
/**
* qrtr_endpoint_register() - register a new endpoint
* @ep: endpoint to register
@@ -590,9 +741,16 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
node->nid = QRTR_EP_NID_AUTO;
node->ep = ep;
+ INIT_WORK(&node->read_data, qrtr_node_rx_work);
+ INIT_WORK(&node->say_hello, qrtr_hello_work);
+ node->wq = alloc_ordered_workqueue("qrtr_wq", 0);
+ if (!node->wq)
+ return -ENOMEM;
+
INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
mutex_init(&node->qrtr_tx_lock);
+ atomic_set(&node->hello_sent, 0);
qrtr_node_assign(node, nid);
mutex_lock(&qrtr_node_lock);
@@ -600,6 +758,8 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int nid)
mutex_unlock(&qrtr_node_lock);
ep->node = node;
+ queue_work(node->wq, &node->say_hello);
+
return 0;
}
EXPORT_SYMBOL_GPL(qrtr_endpoint_register);
@@ -757,7 +917,7 @@ static void qrtr_reset_ports(void)
xa_for_each_start(&qrtr_ports, index, ipc, 1) {
sock_hold(&ipc->sk);
ipc->sk.sk_err = ENETRESET;
- sk_error_report(&ipc->sk);
+ ipc->sk.sk_error_report(&ipc->sk);
sock_put(&ipc->sk);
}
rcu_read_unlock();
@@ -795,6 +955,17 @@ static int __qrtr_bind(struct socket *sock,
if (port == QRTR_PORT_CTRL)
qrtr_reset_ports();
+ if (port == QRTR_PORT_CTRL) {
+ struct radix_tree_iter iter;
+ struct qrtr_node *node;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+ node = *slot;
+ atomic_set(&node->hello_sent, 0);
+ }
+ }
+
return 0;
}