diff options
author | Linaro CI <ci_notify@linaro.org> | 2021-11-03 07:03:41 +0000 |
---|---|---|
committer | Linaro CI <ci_notify@linaro.org> | 2021-11-03 07:03:41 +0000 |
commit | 3b79077ab98f5a9ee6c3575f17d93cf6153231fc (patch) | |
tree | a1a3d0d5f27c34f619641f06f42080e7b46b2f3b | |
parent | cd905c5661afe7f0d6450a162e4e8f15dd30fe09 (diff) | |
parent | 818885b4d7cc9eb282ee4bf463ce8159bf508145 (diff) |
Merge remote-tracking branch 'sdx55-drivers/tracking-qcomlt-sdx55-drivers' into integration-linux-qcomlt
27 files changed, 4997 insertions, 3 deletions
diff --git a/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml new file mode 100644 index 000000000000..9fe6d1cef767 --- /dev/null +++ b/Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml @@ -0,0 +1,158 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/pci/qcom,pcie-ep.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm PCIe Endpoint Controller binding + +maintainers: + - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + +allOf: + - $ref: "pci-ep.yaml#" + +properties: + compatible: + const: qcom,sdx55-pcie-ep + + reg: + items: + - description: Qualcomm specific PARF configuration registers + - description: Designware PCIe registers + - description: External local bus interface registers + - description: Address Translation Unit (ATU) registers + - description: Memory region used to map remote RC address space + - description: BAR memory region + + reg-names: + items: + - const: parf + - const: dbi + - const: elbi + - const: atu + - const: addr_space + - const: mmio + + clocks: + items: + - description: PCIe Auxiliary clock + - description: PCIe CFG AHB clock + - description: PCIe Master AXI clock + - description: PCIe Slave AXI clock + - description: PCIe Slave Q2A AXI clock + - description: PCIe Sleep clock + - description: PCIe Reference clock + + clock-names: + items: + - const: aux + - const: cfg + - const: bus_master + - const: bus_slave + - const: slave_q2a + - const: sleep + - const: ref + + qcom,perst-regs: + description: Reference to a syscon representing TCSR followed by the two + offsets within syscon for Perst enable and Perst separation + enable registers + $ref: "/schemas/types.yaml#/definitions/phandle-array" + items: + minItems: 3 + maxItems: 3 + + interrupts: + items: + - description: PCIe Global interrupt + - description: PCIe Doorbell interrupt + + interrupt-names: + items: + - const: global + - const: doorbell + + reset-gpios: + description: GPIO that is being used as PERST# input signal + maxItems: 1 + + wake-gpios: + description: GPIO that is being used as WAKE# output signal + maxItems: 1 + + resets: + maxItems: 1 + + reset-names: + const: core + + power-domains: + maxItems: 1 + + phys: + maxItems: 1 + + phy-names: + const: pciephy + + num-lanes: + default: 2 + +required: + - compatible + - reg + - reg-names + - clocks + - clock-names + - qcom,perst-regs + - interrupts + - interrupt-names + - reset-gpios + - resets + - reset-names + - power-domains + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/clock/qcom,gcc-sdx55.h> + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/arm-gic.h> + pcie_ep: pcie-ep@40000000 { + compatible = "qcom,sdx55-pcie-ep"; + reg = <0x01c00000 0x3000>, + <0x40000000 0xf1d>, + <0x40000f20 0xc8>, + <0x40001000 0x1000>, + <0x40002000 0x1000>, + <0x01c03000 0x3000>; + reg-names = "parf", "dbi", "elbi", "atu", "addr_space", + "mmio"; + + clocks = <&gcc GCC_PCIE_AUX_CLK>, + <&gcc GCC_PCIE_CFG_AHB_CLK>, + <&gcc GCC_PCIE_MSTR_AXI_CLK>, + <&gcc GCC_PCIE_SLV_AXI_CLK>, + <&gcc GCC_PCIE_SLV_Q2A_AXI_CLK>, + <&gcc GCC_PCIE_SLEEP_CLK>, + <&gcc GCC_PCIE_0_CLKREF_CLK>; + clock-names = "aux", "cfg", "bus_master", "bus_slave", + "slave_q2a", "sleep", "ref"; + + qcom,perst-regs = <&tcsr 0xb258 0xb270>; + + interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "global", "doorbell"; + reset-gpios = <&tlmm 57 GPIO_ACTIVE_LOW>; + wake-gpios = <&tlmm 53 GPIO_ACTIVE_LOW>; + resets = <&gcc GCC_PCIE_BCR>; + reset-names = "core"; + power-domains = <&gcc PCIE_GDSC>; + phys = <&pcie0_lane>; + phy-names = "pciephy"; + max-link-speed = <3>; + num-lanes = <2>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index 3b79fd441dde..05c0ffb1bf64 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -14613,7 +14613,15 @@ M: Stanimir Varbanov <svarbanov@mm-sol.com> L: linux-pci@vger.kernel.org L: linux-arm-msm@vger.kernel.org S: Maintained -F: drivers/pci/controller/dwc/*qcom* +F: drivers/pci/controller/dwc/pcie-qcom.c + +PCIE ENDPOINT DRIVER FOR QUALCOMM +M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> +L: linux-pci@vger.kernel.org +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/pci/qcom,pcie-ep.yaml +F: drivers/pci/controller/dwc/pcie-qcom-ep.c PCIE DRIVER FOR ROCKCHIP M: Shawn Lin <shawn.lin@rock-chips.com> diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index 52c2f35a26a9..16da51130d1a 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -39,4 +39,4 @@ obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o obj-$(CONFIG_DA8XX_MSTPRI) += da8xx-mstpri.o # MHI -obj-$(CONFIG_MHI_BUS) += mhi/ +obj-y += mhi/ diff --git a/drivers/bus/mhi/Kconfig b/drivers/bus/mhi/Kconfig index da5cd0c9fc62..c0c89966a1b4 100644 --- a/drivers/bus/mhi/Kconfig +++ b/drivers/bus/mhi/Kconfig @@ -6,13 +6,24 @@ # config MHI_BUS - tristate "Modem Host Interface (MHI) bus" + tristate "Modem Host Interface (MHI) bus host implementation" help Bus driver for MHI protocol. Modem Host Interface (MHI) is a communication protocol used by the host processors to control and communicate with modem devices over a high speed peripheral bus or shared memory. +config MHI_BUS_EP + tristate "Modem Host Interface (MHI) bus Endpoint implementation" + help + Bus driver for MHI protocol. Modem Host Interface (MHI) is a + communication protocol used by the host processors to control + and communicate with modem devices over a high speed peripheral + bus or shared memory. + + MHI_BUS_EP implements the MHI protocol for the endpoint devices + like SDX55 modem connected to the host machine over PCIe. + config MHI_BUS_DEBUG bool "Debugfs support for the MHI bus" depends on MHI_BUS && DEBUG_FS diff --git a/drivers/bus/mhi/Makefile b/drivers/bus/mhi/Makefile index 0a2d778d6fb4..dc242a71ac67 100644 --- a/drivers/bus/mhi/Makefile +++ b/drivers/bus/mhi/Makefile @@ -1,6 +1,9 @@ # core layer obj-y += core/ +# endpoint layer +obj-y += ep/ + obj-$(CONFIG_MHI_BUS_PCI_GENERIC) += mhi_pci_generic.o mhi_pci_generic-y += pci_generic.o diff --git a/drivers/bus/mhi/ep/Makefile b/drivers/bus/mhi/ep/Makefile new file mode 100644 index 000000000000..41f660b4af7d --- /dev/null +++ b/drivers/bus/mhi/ep/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_MHI_BUS_EP) += mhi_ep.o + +mhi_ep-y := main.o mmio.o sm.o ring.o diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h new file mode 100644 index 000000000000..f4484b8b5eb8 --- /dev/null +++ b/drivers/bus/mhi/ep/internal.h @@ -0,0 +1,413 @@ +#ifndef _MHI_EP_INTERNAL_ +#define _MHI_EP_INTERNAL_ + +#include <linux/bitfield.h> +#include <linux/mhi.h> + +extern struct bus_type mhi_ep_bus_type; + +/* MHI register definition */ +#define MHI_CTRL_INT_STATUS_A7 0x4 +#define MHI_CTRL_INT_STATUS_A7_MSK BIT(0) +#define MHI_CTRL_INT_STATUS_CRDB_MSK BIT(1) +#define MHI_CHDB_INT_STATUS_A7_n(n) (0x28 + 0x4 * (n)) +#define MHI_ERDB_INT_STATUS_A7_n(n) (0x38 + 0x4 * (n)) + +#define MHI_CTRL_INT_CLEAR_A7 0x4c +#define MHI_CTRL_INT_MMIO_WR_CLEAR BIT(2) +#define MHI_CTRL_INT_CRDB_CLEAR BIT(1) +#define MHI_CTRL_INT_CRDB_MHICTRL_CLEAR BIT(0) + +#define MHI_CHDB_INT_CLEAR_A7_n(n) (0x70 + 0x4 * (n)) +#define MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_CLEAR_A7_n(n) (0x80 + 0x4 * (n)) +#define MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL GENMASK(31, 0) + +#define MHI_CTRL_INT_MASK_A7 0x94 +#define MHI_CTRL_INT_MASK_A7_MASK_MASK GENMASK(1, 0) +#define MHI_CTRL_MHICTRL_MASK BIT(0) +#define MHI_CTRL_MHICTRL_SHFT 0 +#define MHI_CTRL_CRDB_MASK BIT(1) +#define MHI_CTRL_CRDB_SHFT 1 + +#define MHI_CHDB_INT_MASK_A7_n(n) (0xb8 + 0x4 * (n)) +#define MHI_CHDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0) +#define MHI_ERDB_INT_MASK_A7_n(n) (0xc8 + 0x4 * (n)) +#define MHI_ERDB_INT_MASK_A7_n_EN_ALL GENMASK(31, 0) + +#define MHIREGLEN 0x100 +#define MHIVER 0x108 + +#define MHICFG 0x110 +#define MHICFG_NHWER_MASK GENMASK(31, 24) +#define MHICFG_NER_MASK GENMASK(23, 16) +#define MHICFG_RESERVED_BITS15_8_MASK GENMASK(15, 8) +#define MHICFG_NCH_MASK GENMASK(7, 0) + +#define CHDBOFF 0x118 +#define ERDBOFF 0x120 +#define BHIOFF 0x128 +#define DEBUGOFF 0x130 + +#define MHICTRL 0x138 +#define MHICTRL_MHISTATE_MASK GENMASK(15, 8) +#define MHICTRL_RESET_MASK BIT(1) +#define MHICTRL_RESET_SHIFT 1 + +#define MHISTATUS 0x148 +#define MHISTATUS_MHISTATE_MASK GENMASK(15, 8) +#define MHISTATUS_MHISTATE_SHIFT 8 +#define MHISTATUS_SYSERR_MASK BIT(2) +#define MHISTATUS_SYSERR_SHIFT 2 +#define MHISTATUS_READY_MASK BIT(0) +#define MHISTATUS_READY_SHIFT 0 + +#define CCABAP_LOWER 0x158 +#define CCABAP_HIGHER 0x15C +#define ECABAP_LOWER 0x160 +#define ECABAP_HIGHER 0x164 +#define CRCBAP_LOWER 0x168 +#define CRCBAP_HIGHER 0x16C +#define CRDB_LOWER 0x170 +#define CRDB_HIGHER 0x174 +#define MHICTRLBASE_LOWER 0x180 +#define MHICTRLBASE_HIGHER 0x184 +#define MHICTRLLIMIT_LOWER 0x188 +#define MHICTRLLIMIT_HIGHER 0x18C +#define MHIDATABASE_LOWER 0x198 +#define MHIDATABASE_HIGHER 0x19C +#define MHIDATALIMIT_LOWER 0x1A0 +#define MHIDATALIMIT_HIGHER 0x1A4 +#define CHDB_LOWER_n(n) (0x400 + 0x8 * (n)) +#define CHDB_HIGHER_n(n) (0x404 + 0x8 * (n)) +#define ERDB_LOWER_n(n) (0x800 + 0x8 * (n)) +#define ERDB_HIGHER_n(n) (0x804 + 0x8 * (n)) +#define BHI_INTVEC 0x220 +#define BHI_EXECENV 0x228 +#define BHI_IMGTXDB 0x218 + +#define NR_OF_CMD_RINGS 1 +#define NUM_EVENT_RINGS 128 +#define NUM_HW_EVENT_RINGS 2 +#define NUM_CHANNELS 128 +#define HW_CHANNEL_BASE 100 +#define NUM_HW_CHANNELS 15 +#define HW_CHANNEL_END 110 +#define MHI_ENV_VALUE 2 +#define MHI_MASK_ROWS_CH_EV_DB 4 +#define TRB_MAX_DATA_SIZE 8192 +#define MHI_CTRL_STATE 100 + +/* Channel state */ +enum mhi_ch_state { + MHI_CH_STATE_DISABLED, + MHI_CH_STATE_ENABLED, + MHI_CH_STATE_RUNNING, + MHI_CH_STATE_SUSPENDED, + MHI_CH_STATE_STOP, + MHI_CH_STATE_ERROR, +}; + +#define CHAN_CTX_CHSTATE_MASK GENMASK(7, 0) +#define CHAN_CTX_CHSTATE_SHIFT 0 +#define CHAN_CTX_BRSTMODE_MASK GENMASK(9, 8) +#define CHAN_CTX_BRSTMODE_SHIFT 8 +#define CHAN_CTX_POLLCFG_MASK GENMASK(15, 10) +#define CHAN_CTX_POLLCFG_SHIFT 10 +#define CHAN_CTX_RESERVED_MASK GENMASK(31, 16) +struct mhi_chan_ctx { + __u32 chcfg; + __u32 chtype; + __u32 erindex; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +/* Event ring context type */ +#define EV_CTX_RESERVED_MASK GENMASK(7, 0) +#define EV_CTX_INTMODC_MASK GENMASK(15, 8) +#define EV_CTX_INTMODC_SHIFT 8 +#define EV_CTX_INTMODT_MASK GENMASK(31, 16) +#define EV_CTX_INTMODT_SHIFT 16 +struct mhi_event_ctx { + __u32 intmod; + __u32 ertype; + __u32 msivec; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +/* Command context */ +struct mhi_cmd_ctx { + __u32 reserved0; + __u32 reserved1; + __u32 reserved2; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +/* Generic context */ +struct mhi_generic_ctx { + __u32 reserved0; + __u32 reserved1; + __u32 reserved2; + + __u64 rbase __packed __aligned(4); + __u64 rlen __packed __aligned(4); + __u64 rp __packed __aligned(4); + __u64 wp __packed __aligned(4); +}; + +enum mhi_cmd_type { + MHI_CMD_NOP = 1, + MHI_CMD_RESET_CHAN = 16, + MHI_CMD_STOP_CHAN = 17, + MHI_CMD_START_CHAN = 18, +}; + +/* Command Ring Element macros */ + +/* No operation command */ +#define MHI_EP_CRE_NOOP_PTR (0) +#define MHI_EP_CRE_NOOP_DWORD0 (0) +#define MHI_EP_CRE_NOOP_DWORD1 (MHI_CMD_NOP << 16) + +/* Channel reset command */ +#define MHI_EP_CRE_RESET_PTR (0) +#define MHI_EP_CRE_RESET_DWORD0 (0) +#define MHI_EP_CRE_RESET_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_RESET_CHAN << 16)) + +/* Channel stop command */ +#define MHI_EP_CRE_STOP_PTR (0) +#define MHI_EP_CRE_STOP_DWORD0 (0) +#define MHI_EP_CRE_STOP_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_STOP_CHAN << 16)) + +/* Channel start command */ +#define MHI_EP_CRE_START_PTR (0) +#define MHI_EP_CRE_START_DWORD0 (0) +#define MHI_EP_CRE_START_DWORD1(chid) ((chid << 24) | \ + (MHI_CMD_START_CHAN << 16)) + +#define MHI_EP_CRE_GET_CHID(cre) (((cre)->dword[1] >> 24) & 0xff) +#define MHI_EP_CRE_GET_TYPE(cre) (((cre)->dword[1] >> 16) & 0xff) + +/* Event Ring Element macros */ + +/* Transfer completion event */ +#define MHI_EP_ERE_TR_PTR 0 +#define MHI_EP_ERE_TR_DWORD0(code, len) ((code << 24) | len) +#define MHI_EP_ERE_TR_DWORD1(chid, type) ((chid << 24) | (type << 16)) + +/* State change event */ +#define MHI_EP_ERE_SC_PTR 0 +#define MHI_EP_ERE_SC_DWORD0(state) (state << 24) +#define MHI_EP_ERE_SC_DWORD1(type) (type << 16) + +/* EE event */ +#define MHI_EP_ERE_EE_PTR 0 +#define MHI_EP_ERE_EE_DWORD0(ee) (ee << 24) +#define MHI_EP_ERE_EE_DWORD1(type) (type << 16) + +/* Command Completion event */ +#define MHI_EP_ERE_CC_PTR(ptr) (ptr) +#define MHI_EP_ERE_CC_DWORD0(code) (code << 24) +#define MHI_EP_ERE_CC_DWORD1(type) (type << 16) + +/* Transfer Ring Element macros */ +#define MHI_EP_TRE_PTR(ptr) (ptr) +#define MHI_EP_TRE_DWORD0(len) (len & MHI_MAX_MTU) +#define MHI_EP_TRE_DWORD1(bei, ieot, ieob, chain) ((2 << 16) | (bei << 10) \ + | (ieot << 9) | (ieob << 8) | chain) +#define MHI_EP_TRE_GET_PTR(tre) ((tre)->ptr) +#define MHI_EP_TRE_GET_LEN(tre) ((tre)->dword[0] & 0xffff) +#define MHI_EP_TRE_GET_CHAIN(tre) FIELD_GET(BIT(0), (tre)->dword[1]) +#define MHI_EP_TRE_GET_IEOB(tre) FIELD_GET(BIT(8), (tre)->dword[1]) +#define MHI_EP_TRE_GET_IEOT(tre) FIELD_GET(BIT(9), (tre)->dword[1]) +#define MHI_EP_TRE_GET_BEI(tre) FIELD_GET(BIT(10), (tre)->dword[1]) + +enum mhi_pkt_type { + MHI_PKT_TYPE_INVALID = 0x0, + MHI_PKT_TYPE_NOOP_CMD = 0x1, + MHI_PKT_TYPE_TRANSFER = 0x2, + MHI_PKT_TYPE_COALESCING = 0x8, + MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10, + MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11, + MHI_PKT_TYPE_START_CHAN_CMD = 0x12, + MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20, + MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21, + MHI_PKT_TYPE_TX_EVENT = 0x22, + MHI_PKT_TYPE_RSC_TX_EVENT = 0x28, + MHI_PKT_TYPE_EE_EVENT = 0x40, + MHI_PKT_TYPE_TSYNC_EVENT = 0x48, + MHI_PKT_TYPE_BW_REQ_EVENT = 0x50, + MHI_PKT_TYPE_STALE_EVENT, /* internal event */ +}; + +/* MHI transfer completion events */ +enum mhi_ev_ccs { + MHI_EV_CC_INVALID = 0x0, + MHI_EV_CC_SUCCESS = 0x1, + MHI_EV_CC_EOT = 0x2, /* End of transfer event */ + MHI_EV_CC_OVERFLOW = 0x3, + MHI_EV_CC_EOB = 0x4, /* End of block event */ + MHI_EV_CC_OOB = 0x5, /* Out of block event */ + MHI_EV_CC_DB_MODE = 0x6, + MHI_EV_CC_UNDEFINED_ERR = 0x10, + MHI_EV_CC_BAD_TRE = 0x11, +}; + +enum mhi_ep_ring_state { + RING_STATE_UINT = 0, + RING_STATE_IDLE, +}; + +enum mhi_ep_ring_type { + RING_TYPE_CMD = 0, + RING_TYPE_ER, + RING_TYPE_CH, + RING_TYPE_INVALID, +}; + +enum mhi_ep_execenv { + MHI_EP_SBL_EE = 1, + MHI_EP_AMSS_EE = 2, + MHI_EP_UNRESERVED +}; + +struct mhi_ep_ring_element { + u64 ptr; + u32 dword[2]; +}; + +/* Transfer ring element type */ +union mhi_ep_ring_ctx { + struct mhi_cmd_ctx cmd; + struct mhi_event_ctx ev; + struct mhi_chan_ctx ch; + struct mhi_generic_ctx generic; +}; + +struct mhi_ep_ring { + struct list_head list; + struct mhi_ep_cntrl *mhi_cntrl; + int (*ring_cb)(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el); + union mhi_ep_ring_ctx *ring_ctx; + struct mhi_ep_ring_element *ring_cache; + enum mhi_ep_ring_type type; + enum mhi_ep_ring_state state; + size_t rd_offset; + size_t wr_offset; + size_t ring_size; + u32 db_offset_h; + u32 db_offset_l; + u32 ch_id; +}; + +struct mhi_ep_cmd { + struct mhi_ep_ring ring; +}; + +struct mhi_ep_event { + struct mhi_ep_ring ring; +}; + +extern const char * const mhi_ep_state_str[MHI_STATE_MAX]; +#define TO_MHI_STATE_STR(state) ((state >= MHI_STATE_MAX || \ + !mhi_ep_state_str[state]) ? \ + "INVALID_STATE" : mhi_ep_state_str[state]) + +extern const char * const mhi_ep_link_state_str[LINK_STATE_MAX]; +#define TO_LINK_STATE_STR(state) ((state >= LINK_STATE_MAX || \ + !mhi_ep_link_state_str[state]) ? \ + "INVALID_STATE" : mhi_ep_link_state_str[state]) + +struct mhi_ep_state_transition { + struct list_head node; + enum mhi_state state; +}; + +struct mhi_ep_chan { + char *name; + struct mhi_ep_device *mhi_dev; + struct mhi_ep_ring ring; + struct mutex lock; + void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); + enum mhi_ch_state state; + enum dma_data_direction dir; + u64 tre_loc; + u32 tre_size; + u32 tre_bytes_left; + u32 chan; + bool skip_td; +}; + +/* MHI Ring related functions */ +int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el); +int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el); +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id); +void mhi_ep_ring_stop(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring); +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr); +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx); +int mhi_ep_process_ring(struct mhi_ep_ring *ring); +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *element, + int evt_offset); +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring); + +/* MMIO related functions */ +void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval); +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val); +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, + u32 mask, u32 shift, u32 val); +int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, + u32 mask, u32 shift, u32 *regval); +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id); +void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id); +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_offset); +void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_offset); +void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_offset); +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value); +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset); +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl); + +/* MHI EP core functions */ +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state); +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_execenv exec_env); +bool mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state cur_mhi_state, + enum mhi_state mhi_state); +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state); +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_suspend(struct mhi_ep_cntrl *mhi_cntrl); +int mhi_ep_resume(struct mhi_ep_cntrl *mhi_cntrl); + +#endif diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c new file mode 100644 index 000000000000..46440c876769 --- /dev/null +++ b/drivers/bus/mhi/ep/main.c @@ -0,0 +1,1684 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MHI Bus Endpoint stack + * + * Copyright (C) 2021 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/dma-direction.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mhi_ep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include "internal.h" + +/* Wait time before suspend/resume is complete */ +#define MHI_SUSPEND_MIN 100 +#define MHI_SUSPEND_TIMEOUT 600 +#define MHI_MASK_CH_EV_LEN 32 + +static DEFINE_IDA(mhi_ep_cntrl_ida); + +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id); +static int mhi_ep_destroy_device(struct device *dev, void *data); + +int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 event_ring, + struct mhi_ep_ring_element *event) +{ + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_event[event_ring].ring; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + union mhi_ep_ring_ctx *ctx; + int ret; + + mutex_lock(&mhi_cntrl->event_lock); + ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[event_ring]; + if (ring->state == RING_STATE_UINT) { + ret = mhi_ep_ring_start(mhi_cntrl, ring, ctx); + if (ret) { + dev_err(dev, "Error starting event ring (%d)\n", event_ring); + goto err_unlock; + } + } + + /* Add element to the primary event ring (0) */ + ret = mhi_ep_ring_add_element(ring, event, 0); + if (ret) { + dev_err(dev, "Error adding element to event ring (%d)\n", event_ring); + goto err_unlock; + } + + /* Ensure that the ring pointer gets updated in host memory before triggering MSI */ + wmb(); + + mutex_unlock(&mhi_cntrl->event_lock); + + /* Raise MSI to host */ + mhi_cntrl->raise_irq(mhi_cntrl); + + return 0; + +err_unlock: + mutex_unlock(&mhi_cntrl->event_lock); + + return ret; +} + +static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, u32 len, + enum mhi_ev_ccs code) +{ + struct mhi_ep_ring_element event = {}; + u32 er_index, tmp; + + er_index = mhi_cntrl->ch_ctx_cache[ring->ch_id].erindex; + event.ptr = ring->ring_ctx->generic.rbase + + ring->rd_offset * sizeof(struct mhi_ep_ring_element); + + tmp = event.dword[0]; + tmp |= MHI_EP_ERE_TR_DWORD0(code, len); + event.dword[0] = tmp; + + tmp = event.dword[1]; + tmp |= MHI_EP_ERE_TR_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + event.dword[1] = tmp; + + return mhi_ep_send_event(mhi_cntrl, er_index, &event); +} + +int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_ring_element event = {}; + u32 tmp; + + tmp = event.dword[0]; + tmp |= MHI_EP_ERE_SC_DWORD0(state); + event.dword[0] = tmp; + + tmp = event.dword[1]; + tmp |= MHI_EP_ERE_SC_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + event.dword[1] = tmp; + + return mhi_ep_send_event(mhi_cntrl, 0, &event); +} + +int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_execenv exec_env) +{ + struct mhi_ep_ring_element event = {}; + u32 tmp; + + tmp = event.dword[0]; + tmp |= MHI_EP_ERE_EE_DWORD0(exec_env); + event.dword[0] = tmp; + + tmp = event.dword[1]; + tmp |= MHI_EP_ERE_SC_DWORD1(MHI_PKT_TYPE_EE_EVENT); + event.dword[1] = tmp; + + return mhi_ep_send_event(mhi_cntrl, 0, &event); +} + +static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_ev_ccs code) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_element event = {}; + u32 tmp; + + if (code > MHI_EV_CC_BAD_TRE) { + dev_err(dev, "Invalid command completion code: %d\n", code); + return -EINVAL; + } + + event.ptr = mhi_cntrl->cmd_ctx_cache->rbase + + (mhi_cntrl->mhi_cmd->ring.rd_offset * + (sizeof(struct mhi_ep_ring_element))); + + tmp = event.dword[0]; + tmp |= MHI_EP_ERE_CC_DWORD0(code); + event.dword[0] = tmp; + + tmp = event.dword[1]; + tmp |= MHI_EP_ERE_CC_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + event.dword[1] = tmp; + + return mhi_ep_send_event(mhi_cntrl, 0, &event); +} + +/* + * We don't need to do anything special other than setting the MHI SYS_ERR + * state. The host issue will reset all contexts and issue MHI RESET so that we + * could also recover from error state. + */ +void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* If MHI EP is not enabled, nothing to do */ + if (!mhi_cntrl->is_enabled) + return; + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + return; + + /* Signal host that the device went to SYS_ERR state */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_SYS_ERR); + if (ret) + dev_err(dev, "Failed sending SYS_ERR state change event: %d\n", ret); +} + +void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently running */ + tmp = mhi_cntrl->ch_ctx_cache[i].chcfg; + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(dev, "Suspending channel (%d)\n", i); + /* Set channel state to SUSPENDED */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_SUSPENDED << CHAN_CTX_CHSTATE_SHIFT); + mhi_cntrl->ch_ctx_cache[i].chcfg = tmp; + mutex_unlock(&mhi_chan->lock); + } +} + +void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_chan *mhi_chan; + u32 tmp; + int i; + + for (i = 0; i < mhi_cntrl->max_chan; i++) { + mhi_chan = &mhi_cntrl->mhi_chan[i]; + + if (!mhi_chan->mhi_dev) + continue; + + mutex_lock(&mhi_chan->lock); + /* Skip if the channel is not currently suspended */ + tmp = mhi_cntrl->ch_ctx_cache[i].chcfg; + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { + mutex_unlock(&mhi_chan->lock); + continue; + } + + dev_dbg(dev, "Resuming channel (%d)\n", i); + /* Set channel state to RUNNING */ + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_RUNNING << CHAN_CTX_CHSTATE_SHIFT); + mhi_cntrl->ch_ctx_cache[i].chcfg = tmp; + mutex_unlock(&mhi_chan->lock); + } +} + +int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring *ch_ring, *event_ring; + union mhi_ep_ring_ctx *event_ctx; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + u32 event_ring_idx, tmp; + u32 ch_id; + int ret; + + ch_id = MHI_EP_CRE_GET_CHID(el); + mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + ch_ring = &mhi_cntrl->mhi_chan[ch_id].ring; + + switch (MHI_EP_CRE_GET_TYPE(el)) { + case MHI_PKT_TYPE_START_CHAN_CMD: + dev_dbg(dev, "Received START command for channel (%d)\n", ch_id); + + mutex_lock(&mhi_chan->lock); + /* Initialize and configure the corresponding channel ring */ + if (ch_ring->state == RING_STATE_UINT) { + ret = mhi_ep_ring_start(mhi_cntrl, ch_ring, + (union mhi_ep_ring_ctx *)&mhi_cntrl->ch_ctx_cache[ch_id]); + if (ret) { + dev_err(dev, "Failed to start ring for channel (%d)\n", ch_id); + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, + MHI_EV_CC_UNDEFINED_ERR); + if (ret) + dev_err(dev, "Error sending completion event: %d\n", + MHI_EV_CC_UNDEFINED_ERR); + + goto err_unlock; + } + } + + /* Enable DB for the channel */ + mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ch_id); + + mutex_lock(&mhi_cntrl->event_lock); + event_ring_idx = mhi_cntrl->ch_ctx_cache[ch_id].erindex; + event_ring = &mhi_cntrl->mhi_event[event_ring_idx].ring; + event_ctx = (union mhi_ep_ring_ctx *)&mhi_cntrl->ev_ctx_cache[event_ring_idx]; + if (event_ring->state == RING_STATE_UINT) { + ret = mhi_ep_ring_start(mhi_cntrl, event_ring, event_ctx); + if (ret) { + dev_err(dev, "Error starting event ring: %d\n", + mhi_cntrl->ch_ctx_cache[ch_id].erindex); + mutex_unlock(&mhi_cntrl->event_lock); + goto err_unlock; + } + } + + mutex_unlock(&mhi_cntrl->event_lock); + + /* Set channel state to RUNNING */ + mhi_chan->state = MHI_CH_STATE_RUNNING; + tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_RUNNING << CHAN_CTX_CHSTATE_SHIFT); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp; + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event: %d\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + + /* + * Create MHI device only during UL channel start. Since the MHI + * channels operate in a pair, we'll associate both UL and DL + * channels to the same device. + * + * We also need to check for mhi_dev != NULL because, the host + * will issue START_CHAN command during resume and we don't + * destroy the device during suspend. + */ + if (!(ch_id % 2) && !mhi_chan->mhi_dev) { + ret = mhi_ep_create_device(mhi_cntrl, ch_id); + if (ret) { + dev_err(dev, "Error creating device for channel (%d)\n", ch_id); + return ret; + } + } + + break; + case MHI_PKT_TYPE_STOP_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%d)\n", ch_id); + if (ch_ring->state == RING_STATE_UINT) { + dev_err(dev, "Channel (%d) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Disable DB for the channel */ + mhi_ep_mmio_disable_chdb_a7(mhi_cntrl, ch_id); + + /* Set the local value of the transfer ring read pointer to the channel context */ + ch_ring->rd_offset = mhi_ep_ring_addr2offset(ch_ring, + ch_ring->ring_ctx->generic.rp); + + /* Send channel disconnect status to client drivers */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to STOP */ + mhi_chan->state = MHI_CH_STATE_STOP; + tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_STOP << CHAN_CTX_CHSTATE_SHIFT); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp; + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event: %d\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + + mutex_unlock(&mhi_chan->lock); + break; + case MHI_PKT_TYPE_RESET_CHAN_CMD: + dev_dbg(dev, "Received STOP command for channel (%d)\n", ch_id); + if (ch_ring->state == RING_STATE_UINT) { + dev_err(dev, "Channel (%d) not opened\n", ch_id); + return -ENODEV; + } + + mutex_lock(&mhi_chan->lock); + /* Stop and reset the transfer ring */ + mhi_ep_ring_stop(mhi_cntrl, ch_ring); + + /* Send channel disconnect status to client driver */ + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + tmp = mhi_cntrl->ch_ctx_cache[ch_id].chcfg; + tmp &= ~CHAN_CTX_CHSTATE_MASK; + tmp |= (MHI_CH_STATE_DISABLED << CHAN_CTX_CHSTATE_SHIFT); + mhi_cntrl->ch_ctx_cache[ch_id].chcfg = tmp; + + ret = mhi_ep_send_cmd_comp_event(mhi_cntrl, MHI_EV_CC_SUCCESS); + if (ret) { + dev_err(dev, "Error sending command completion event: %d\n", + MHI_EV_CC_SUCCESS); + goto err_unlock; + } + mutex_unlock(&mhi_chan->lock); + break; + default: + break; + } + + return 0; + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + +static int mhi_ep_check_tre_bytes_left(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_ep_ring_element *el) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + bool td_done = 0; + + /* A full TRE worth of data was consumed. Check if we are at a TD boundary */ + if (mhi_chan->tre_bytes_left == 0) { + if (MHI_EP_TRE_GET_CHAIN(el)) { + if (MHI_EP_TRE_GET_IEOB(el)) + mhi_ep_send_completion_event(mhi_cntrl, + ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOB); + } else { + if (MHI_EP_TRE_GET_IEOT(el)) + mhi_ep_send_completion_event(mhi_cntrl, + ring, MHI_EP_TRE_GET_LEN(el), MHI_EV_CC_EOT); + td_done = 1; + } + + mhi_ep_ring_inc_index(ring); + mhi_chan->tre_bytes_left = 0; + mhi_chan->tre_loc = 0; + } + + return td_done; +} + +static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_ring *ring, + struct mhi_result *result, + u32 len) +{ + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t bytes_to_read, addr_offset; + struct mhi_ep_ring_element *el; + ssize_t bytes_read = 0; + u32 buf_remaining; + void __iomem *tre_buf; + phys_addr_t tre_phys; + void *write_to_loc; + u64 read_from_loc; + bool td_done = 0; + int ret; + + buf_remaining = len; + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) + return -ENODEV; + + el = &ring->ring_cache[ring->rd_offset]; + + if (mhi_chan->tre_loc) { + bytes_to_read = min(buf_remaining, + mhi_chan->tre_bytes_left); + dev_dbg(dev, "TRE bytes remaining: %d", mhi_chan->tre_bytes_left); + } else { + if (mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)) + /* Nothing to do */ + return 0; + + mhi_chan->tre_loc = MHI_EP_TRE_GET_PTR(el); + mhi_chan->tre_size = MHI_EP_TRE_GET_LEN(el); + mhi_chan->tre_bytes_left = mhi_chan->tre_size; + + bytes_to_read = min(buf_remaining, mhi_chan->tre_size); + } + + bytes_read += bytes_to_read; + addr_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; + read_from_loc = mhi_chan->tre_loc + addr_offset; + write_to_loc = result->buf_addr + (len - buf_remaining); + mhi_chan->tre_bytes_left -= bytes_to_read; + + tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &tre_phys, bytes_to_read); + if (!tre_buf) { + dev_err(dev, "Failed to allocate TRE buffer\n"); + return -ENOMEM; + } + + ret = mhi_cntrl->map_addr(mhi_cntrl, tre_phys, read_from_loc, bytes_to_read); + if (ret) { + dev_err(dev, "Failed to map TRE buffer\n"); + goto err_tre_free; + } + + dev_dbg(dev, "Reading %d bytes from channel (%d)", bytes_to_read, ring->ch_id); + memcpy_fromio(write_to_loc, tre_buf, bytes_to_read); + + mhi_cntrl->unmap_addr(mhi_cntrl, tre_phys); + mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_read); + + buf_remaining -= bytes_to_read; + td_done = mhi_ep_check_tre_bytes_left(mhi_cntrl, ring, el); + } while (buf_remaining && !td_done); + + result->bytes_xferd = bytes_read; + + return bytes_read; + +err_tre_free: + mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_read); + + return ret; +} + +int mhi_ep_process_tre_ring(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *el) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_result result = {}; + u32 len = MHI_EP_DEFAULT_MTU; + struct mhi_ep_chan *mhi_chan; + int ret = 0; + + if (ring->ch_id > mhi_cntrl->max_chan) { + dev_err(dev, "Invalid channel ring (%d)\n", ring->ch_id); + return -EINVAL; + } + + mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; + + /* + * Bail out if transfer callback is not registered for the channel. + * This is most likely due to the client driver not loaded at this point. + */ + if (!mhi_chan->xfer_cb) { + dev_err(dev, "No client available for channel (%d)\n", ring->ch_id); + return -ENODEV; + } + + dev_dbg(dev, "Processing TRE ring for channel (%d)\n", ring->ch_id); + + mutex_lock(&mhi_chan->lock); + if (ring->ch_id % 2) { + /* DL channel */ + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } else { + /* UL channel */ + while (1) { + result.buf_addr = kzalloc(len, GFP_KERNEL); + if (!result.buf_addr) { + ret = -ENOMEM; + goto err_unlock; + } + + ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + if (ret < 0) { + dev_err(dev, "Failed to read channel (%d)\n", ring->ch_id); + kfree(result.buf_addr); + break; + } else if (ret == 0) { + /* No more data to read */ + kfree(result.buf_addr); + break; + } + + result.dir = mhi_chan->dir; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + kfree(result.buf_addr); + } + } + +err_unlock: + mutex_unlock(&mhi_chan->lock); + + return ret; +} + +static int mhi_ep_cache_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + /* Update the number of event rings (NER) programmed by the host */ + mhi_ep_mmio_update_ner(mhi_cntrl); + + dev_dbg(dev, "Number of Event rings: %d, HW Event rings: %d\n", + mhi_cntrl->event_rings, mhi_cntrl->hw_event_rings); + + mhi_cntrl->ch_ctx_host_size = sizeof(struct mhi_chan_ctx) * + mhi_cntrl->max_chan; + mhi_cntrl->ev_ctx_host_size = sizeof(struct mhi_event_ctx) * + mhi_cntrl->event_rings; + mhi_cntrl->cmd_ctx_host_size = sizeof(struct mhi_cmd_ctx); + + /* Get the channel context base pointer from host */ + mhi_ep_mmio_get_chc_base(mhi_cntrl); + + /* Allocate memory for caching host channel context */ + mhi_cntrl->ch_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->ch_ctx_cache_phys, + mhi_cntrl->ch_ctx_host_size); + if (!mhi_cntrl->ch_ctx_cache) { + dev_err(dev, "Failed to allocate ch_ctx_cache memory\n"); + return -ENOMEM; + } + + /* Map the host channel context */ + ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys, + mhi_cntrl->ch_ctx_host_pa, mhi_cntrl->ch_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to map ch_ctx_cache\n"); + goto err_ch_ctx; + } + + /* Get the event context base pointer from host */ + mhi_ep_mmio_get_erc_base(mhi_cntrl); + + /* Allocate memory for caching host event context */ + mhi_cntrl->ev_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->ev_ctx_cache_phys, + mhi_cntrl->ev_ctx_host_size); + if (!mhi_cntrl->ev_ctx_cache) { + dev_err(dev, "Failed to allocate ev_ctx_cache memory\n"); + ret = -ENOMEM; + goto err_ch_ctx_map; + } + + /* Map the host event context */ + ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys, + mhi_cntrl->ev_ctx_host_pa, mhi_cntrl->ev_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to map ev_ctx_cache\n"); + goto err_ev_ctx; + } + + /* Get the command context base pointer from host */ + mhi_ep_mmio_get_crc_base(mhi_cntrl); + + /* Allocate memory for caching host command context */ + mhi_cntrl->cmd_ctx_cache = mhi_cntrl->alloc_addr(mhi_cntrl, &mhi_cntrl->cmd_ctx_cache_phys, + mhi_cntrl->cmd_ctx_host_size); + if (!mhi_cntrl->cmd_ctx_cache) { + dev_err(dev, "Failed to allocate cmd_ctx_cache memory\n"); + ret = -ENOMEM; + goto err_ev_ctx_map; + } + + /* Map the host command context */ + ret = mhi_cntrl->map_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys, + mhi_cntrl->cmd_ctx_host_pa, mhi_cntrl->cmd_ctx_host_size); + if (ret) { + dev_err(dev, "Failed to map cmd_ctx_cache\n"); + goto err_cmd_ctx; + } + + /* Initialize command ring */ + ret = mhi_ep_ring_start(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring, + (union mhi_ep_ring_ctx *)mhi_cntrl->cmd_ctx_cache); + if (ret) { + dev_err(dev, "Failed to start the command ring\n"); + goto err_cmd_ctx_map; + } + + return ret; + +err_cmd_ctx_map: + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys); + +err_cmd_ctx: + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys, + mhi_cntrl->cmd_ctx_cache, mhi_cntrl->cmd_ctx_host_size); + +err_ev_ctx_map: + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys); + +err_ev_ctx: + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys, + mhi_cntrl->ev_ctx_cache, mhi_cntrl->ev_ctx_host_size); + +err_ch_ctx_map: + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys); + +err_ch_ctx: + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys, + mhi_cntrl->ch_ctx_cache, mhi_cntrl->ch_ctx_host_size); + + return ret; +} + +static void mhi_ep_free_host_cfg(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys); + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->cmd_ctx_cache_phys, + mhi_cntrl->cmd_ctx_cache, mhi_cntrl->cmd_ctx_host_size); + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys); + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ev_ctx_cache_phys, + mhi_cntrl->ev_ctx_cache, mhi_cntrl->ev_ctx_host_size); + mhi_cntrl->unmap_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys); + mhi_cntrl->free_addr(mhi_cntrl, mhi_cntrl->ch_ctx_cache_phys, + mhi_cntrl->ch_ctx_cache, mhi_cntrl->ch_ctx_host_size); +} + +static void mhi_ep_enable_int(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_enable_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_enable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_enable_cmdb_interrupt(mhi_cntrl); +} + +static int mhi_ep_enable(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 max_cnt = 0; + bool mhi_reset; + int ret; + + /* Wait for Host to set the M0 state */ + do { + msleep(MHI_SUSPEND_MIN); + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + if (mhi_reset) { + /* Clear the MHI reset if host is in reset state */ + mhi_ep_mmio_clear_reset(mhi_cntrl); + dev_dbg(dev, "Host initiated reset while waiting for M0\n"); + } + max_cnt++; + } while (state != MHI_STATE_M0 && max_cnt < MHI_SUSPEND_TIMEOUT); + + if (state == MHI_STATE_M0) { + ret = mhi_ep_cache_host_cfg(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to cache host config\n"); + return ret; + } + + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE); + } else { + dev_err(dev, "Host failed to enter M0\n"); + return -ETIMEDOUT; + } + + /* Enable all interrupts now */ + mhi_ep_enable_int(mhi_cntrl); + + return 0; +} + +static void mhi_ep_ring_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, + struct mhi_ep_cntrl, ring_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring *ring; + struct list_head *cp, *q; + unsigned long flags; + int ret = 0; + + /* Process the command ring first */ + ret = mhi_ep_process_ring(&mhi_cntrl->mhi_cmd->ring); + if (ret) { + dev_err(dev, "Error processing command ring\n"); + goto err_unlock; + } + + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + /* Process the channel rings now */ + list_for_each_safe(cp, q, &mhi_cntrl->ring_transition_list) { + ring = list_entry(cp, struct mhi_ep_ring, list); + list_del(cp); + ret = mhi_ep_process_ring(ring); + if (ret) { + dev_err(dev, "Error processing channel ring: %d\n", ring->ch_id); + goto err_unlock; + } + + /* Re-enable channel interrupt */ + mhi_ep_mmio_enable_chdb_a7(mhi_cntrl, ring->ch_id); + } + +err_unlock: + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); +} + +static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, + u32 ch_int, u32 ch_idx) +{ + struct mhi_ep_ring *ring; + + for (; ch_int; ch_idx++, ch_int >>= 1) { + if (ch_int & 1) { + ring = &mhi_cntrl->mhi_chan[ch_idx].ring; + + spin_lock(&mhi_cntrl->transition_lock); + list_add(&ring->list, &mhi_cntrl->ring_transition_list); + spin_unlock(&mhi_cntrl->transition_lock); + /* + * Disable the channel interrupt here and enable it once + * the current interrupt got serviced + */ + mhi_ep_mmio_disable_chdb_a7(mhi_cntrl, ch_idx); + queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work); + } + } +} + +static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + u32 ch_int, ch_idx; + int i; + + mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl); + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + ch_idx = i * MHI_MASK_CH_EV_LEN; + + /* Only process channel interrupt if the mask is enabled */ + ch_int = (mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask); + if (ch_int) { + dev_dbg(dev, "Processing channel doorbell interrupt\n"); + mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx); + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i), + mhi_cntrl->chdb[i].status); + } + } +} + +void mhi_ep_state_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, state_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_state_transition *itr, *tmp; + unsigned long flags; + LIST_HEAD(head); + int ret; + + spin_lock_irqsave(&mhi_cntrl->transition_lock, flags); + list_splice_tail_init(&mhi_cntrl->st_transition_list, &head); + spin_unlock_irqrestore(&mhi_cntrl->transition_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dev_dbg(dev, "Handling MHI state transition to %s\n", + TO_MHI_STATE_STR(itr->state)); + + switch (itr->state) { + case MHI_STATE_M0: + ret = mhi_ep_set_m0_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M0 state\n"); + break; + case MHI_STATE_M3: + ret = mhi_ep_set_m3_state(mhi_cntrl); + if (ret) + dev_err(dev, "Failed to transition to M3 state\n"); + break; + default: + dev_err(dev, "Invalid MHI state transition: %d", itr->state); + break; + } + } +} + +static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state state) +{ + struct mhi_ep_state_transition *item = kmalloc(sizeof(*item), GFP_ATOMIC); + + item->state = state; + spin_lock(&mhi_cntrl->transition_lock); + list_add_tail(&item->node, &mhi_cntrl->st_transition_list); + spin_unlock(&mhi_cntrl->transition_lock); + + queue_work(mhi_cntrl->state_wq, &mhi_cntrl->state_work); +} + +static void mhi_ep_abort_transfer(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_ring *ch_ring, *ev_ring; + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int i; + + /* Stop all the channels */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + if (ch_ring->state == RING_STATE_UINT) + continue; + + mhi_chan = &mhi_cntrl->mhi_chan[i]; + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to client drivers */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + mutex_unlock(&mhi_chan->lock); + } + + flush_workqueue(mhi_cntrl->ring_wq); + flush_workqueue(mhi_cntrl->state_wq); + + /* Destroy devices associated with all channels */ + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_ep_destroy_device); + + /* Stop and reset the transfer rings */ + for (i = 0; i < mhi_cntrl->max_chan; i++) { + ch_ring = &mhi_cntrl->mhi_chan[i].ring; + if (ch_ring->state == RING_STATE_UINT) + continue; + + mhi_chan = &mhi_cntrl->mhi_chan[i]; + mutex_lock(&mhi_chan->lock); + mhi_ep_ring_stop(mhi_cntrl, ch_ring); + mutex_unlock(&mhi_chan->lock); + } + + /* Stop and reset the event rings */ + for (i = 0; i < mhi_cntrl->event_rings; i++) { + ev_ring = &mhi_cntrl->mhi_event[i].ring; + if (ev_ring->state == RING_STATE_UINT) + continue; + + mutex_lock(&mhi_cntrl->event_lock); + mhi_ep_ring_stop(mhi_cntrl, ev_ring); + mutex_unlock(&mhi_cntrl->event_lock); + } + + /* Stop and reset the command ring */ + mhi_ep_ring_stop(mhi_cntrl, &mhi_cntrl->mhi_cmd->ring); + + mhi_ep_free_host_cfg(mhi_cntrl); + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + + mhi_cntrl->is_enabled = false; +} + +/* + * Interrupt handler that services interrupts raised by the host writing to + * MHICTRL and Command ring doorbell (CRDB) registers for state change and + * channel interrupts. + */ +static irqreturn_t mhi_ep_irq(int irq, void *data) +{ + struct mhi_ep_cntrl *mhi_cntrl = data; + + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state state; + u32 int_value = 0; + bool mhi_reset; + + /* Acknowledge the interrupts */ + mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS_A7, &int_value); + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7, int_value); + + /* Check for ctrl interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_A7_MSK, int_value)) { + dev_dbg(dev, "Processing ctrl interrupt\n"); + + mhi_ep_mmio_get_mhi_state(mhi_cntrl, &state, &mhi_reset); + + if (mhi_reset) { + dev_info(dev, "Host triggered MHI reset!\n"); + disable_irq_nosync(mhi_cntrl->irq); + schedule_work(&mhi_cntrl->reset_work); + return IRQ_HANDLED; + } + + mhi_ep_process_ctrl_interrupt(mhi_cntrl, state); + } + + /* Check for cmd doorbell interrupt */ + if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value)) { + dev_dbg(dev, "Processing command doorbell interrupt\n"); + queue_work(mhi_cntrl->ring_wq, &mhi_cntrl->ring_work); + } + + /* Check for channel interrupts */ + mhi_ep_check_channel_interrupt(mhi_cntrl); + + return IRQ_HANDLED; +} + +void mhi_ep_reset_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, reset_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state cur_state; + int ret; + + mhi_ep_abort_transfer(mhi_cntrl); + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Reset MMIO to signal host that the MHI_RESET is completed in endpoint */ + mhi_ep_mmio_reset(mhi_cntrl); + cur_state = mhi_cntrl->mhi_state; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* + * Only proceed further if the reset is due to SYS_ERR. The host will + * issue reset during shutdown also and we don't need to do re-init in + * that case. + */ + if (cur_state == MHI_STATE_SYS_ERR) { + mhi_ep_mmio_init(mhi_cntrl); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + return; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + return; + } + + enable_irq(mhi_cntrl->irq); + } +} + +void mhi_ep_init_worker(struct work_struct *work) +{ + struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, init_work); + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret, i; + + /* + * Mask all interrupts until the state machine is ready. Interrupts will + * be enabled later with mhi_ep_enable(). + */ + mhi_ep_mmio_mask_interrupts(mhi_cntrl); + mhi_ep_mmio_init(mhi_cntrl); + + mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)), + GFP_KERNEL); + if (!mhi_cntrl->mhi_event) + return; + + /* Initialize command, channel and event rings */ + mhi_ep_ring_init(&mhi_cntrl->mhi_cmd->ring, RING_TYPE_CMD, 0); + for (i = 0; i < mhi_cntrl->max_chan; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_chan[i].ring, RING_TYPE_CH, i); + for (i = 0; i < mhi_cntrl->event_rings; i++) + mhi_ep_ring_init(&mhi_cntrl->mhi_event[i].ring, RING_TYPE_ER, i); + + spin_lock_bh(&mhi_cntrl->state_lock); + mhi_cntrl->mhi_state = MHI_STATE_RESET; + spin_unlock_bh(&mhi_cntrl->state_lock); + + /* Set AMSS EE before signaling ready state */ + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE); + + /* All set, notify the host that we are ready */ + ret = mhi_ep_set_ready_state(mhi_cntrl); + if (ret) + goto err_free_event; + + dev_dbg(dev, "READY state notification sent to the host\n"); + + ret = mhi_ep_enable(mhi_cntrl); + if (ret) { + dev_err(dev, "Failed to enable MHI endpoint: %d\n", ret); + goto err_free_event; + } + + enable_irq(mhi_cntrl->irq); + + return; + +err_free_event: + kfree(mhi_cntrl->mhi_event); +} + +static void skip_to_next_td(struct mhi_ep_chan *mhi_chan, struct mhi_ep_ring *ring) +{ + struct mhi_ep_ring_element *el; + u32 td_boundary_reached = 0; + + mhi_chan->skip_td = 1; + el = &ring->ring_cache[ring->rd_offset]; + while (ring->rd_offset != ring->wr_offset) { + if (td_boundary_reached) { + mhi_chan->skip_td = 0; + break; + } + + if (!MHI_EP_TRE_GET_CHAIN(el)) + td_boundary_reached = 1; + + mhi_ep_ring_inc_index(ring); + el = &ring->ring_cache[ring->rd_offset]; + } +} + +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + return !!(ring->rd_offset == ring->wr_offset); +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); + +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags) +{ + struct mhi_ep_chan *mhi_chan = (dir == DMA_FROM_DEVICE) ? mhi_dev->dl_chan : + mhi_dev->ul_chan; + size_t usr_buf_offset, bytes_to_write; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_ev_ccs code = MHI_EV_CC_INVALID; + u64 write_to_loc, skip_tre = 0; + struct mhi_ep_ring_element *el; + struct mhi_ep_ring *ring; + u32 buf_remaining; + void *read_from_loc; + void __iomem *tre_buf; + phys_addr_t tre_phys; + u32 tre_len; + int ret = 0; + + if (dir == DMA_TO_DEVICE) + return -EINVAL; + + buf_remaining = len; + ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + + mutex_lock(&mhi_chan->lock); + if (mhi_chan->skip_td) + skip_to_next_td(mhi_chan, ring); + + do { + /* Don't process the transfer ring if the channel is not in RUNNING state */ + if (mhi_chan->state != MHI_CH_STATE_RUNNING) { + dev_err(dev, "Channel (%d) not available", mhi_chan->chan); + ret = -ENODEV; + goto err_exit; + } + + if (mhi_ep_queue_is_empty(mhi_dev, dir)) { + dev_err(dev, "TRE not available!\n"); + ret = -EINVAL; + goto err_exit; + } + + el = &ring->ring_cache[ring->rd_offset]; + tre_len = MHI_EP_TRE_GET_LEN(el); + if (skb->len > tre_len) { + dev_err(dev, "Buffer size (%d) is too large for TRE length (%d)!\n", + skb->len, tre_len); + ret = -ENOMEM; + goto err_exit; + } + + bytes_to_write = min(buf_remaining, tre_len); + usr_buf_offset = skb->len - bytes_to_write; + read_from_loc = skb->data; + write_to_loc = MHI_EP_TRE_GET_PTR(el); + + tre_buf = mhi_cntrl->alloc_addr(mhi_cntrl, &tre_phys, bytes_to_write); + if (!tre_buf) { + dev_err(dev, "Failed to allocate TRE buffer\n"); + ret = -ENOMEM; + goto err_exit; + } + + ret = mhi_cntrl->map_addr(mhi_cntrl, tre_phys, write_to_loc, bytes_to_write); + if (ret) { + dev_err(dev, "Failed to map TRE buffer\n"); + goto err_tre_free; + } + + dev_dbg(dev, "Writing %d bytes to channel (%d)", bytes_to_write, ring->ch_id); + memcpy_toio(tre_buf, read_from_loc, bytes_to_write); + + mhi_cntrl->unmap_addr(mhi_cntrl, tre_phys); + mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_write); + + buf_remaining -= bytes_to_write; + if (buf_remaining) { + if (!MHI_EP_TRE_GET_CHAIN(el)) + code = MHI_EV_CC_OVERFLOW; + else if (MHI_EP_TRE_GET_IEOB(el)) + code = MHI_EV_CC_EOB; + } else { + if (MHI_EP_TRE_GET_CHAIN(el)) + skip_tre = 1; + code = MHI_EV_CC_EOT; + } + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, bytes_to_write, code); + if (ret) { + dev_err(dev, "Error sending completion event for channel (%d)\n", + ring->ch_id); + goto err_exit; + } + + mhi_ep_ring_inc_index(ring); + } while (!skip_tre && buf_remaining); + + if (skip_tre) + skip_to_next_td(mhi_chan, ring); + + mutex_unlock(&mhi_chan->lock); + + return 0; + +err_tre_free: + mhi_cntrl->free_addr(mhi_cntrl, tre_phys, tre_buf, bytes_to_write); +err_exit: + mutex_unlock(&mhi_chan->lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_queue_skb); + +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl) +{ + if (mhi_cntrl->is_enabled) + mhi_ep_abort_transfer(mhi_cntrl); + disable_irq(mhi_cntrl->irq); +} +EXPORT_SYMBOL_GPL(mhi_ep_power_down); + +void mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl) +{ + schedule_work(&mhi_cntrl->init_work); + mhi_cntrl->is_enabled = true; +} +EXPORT_SYMBOL_GPL(mhi_ep_power_up); + +static void mhi_ep_release_device(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + /* + * We need to set the mhi_chan->mhi_dev to NULL here since the MHI + * devices for the channels will only get created if the mhi_dev + * associated with it is NULL. + */ + if (mhi_dev->ul_chan) + mhi_dev->ul_chan->mhi_dev = NULL; + + if (mhi_dev->dl_chan) + mhi_dev->dl_chan->mhi_dev = NULL; + + kfree(mhi_dev); +} + +struct mhi_ep_device *mhi_ep_alloc_device(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev; + struct device *dev; + + mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); + if (!mhi_dev) + return ERR_PTR(-ENOMEM); + + dev = &mhi_dev->dev; + device_initialize(dev); + dev->bus = &mhi_ep_bus_type; + dev->release = mhi_ep_release_device; + + if (mhi_cntrl->mhi_dev) { + /* for MHI client devices, parent is the MHI controller device */ + dev->parent = &mhi_cntrl->mhi_dev->dev; + } else { + /* for MHI controller device, parent is the bus device (e.g. PCI EPF) */ + dev->parent = mhi_cntrl->cntrl_dev; + } + + mhi_dev->mhi_cntrl = mhi_cntrl; + + return mhi_dev; +} + +static int mhi_ep_create_device(struct mhi_ep_cntrl *mhi_cntrl, u32 ch_id) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ch_id]; + int ret; + + mhi_dev = mhi_ep_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) + return PTR_ERR(mhi_dev); + + mhi_dev->dev_type = MHI_DEVICE_XFER; + + /* Configure primary channel */ + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Configure secondary channel as well */ + mhi_chan++; + if (mhi_chan->dir == DMA_TO_DEVICE) { + mhi_dev->ul_chan = mhi_chan; + mhi_dev->ul_chan_id = mhi_chan->chan; + } else { + mhi_dev->dl_chan = mhi_chan; + mhi_dev->dl_chan_id = mhi_chan->chan; + } + + get_device(&mhi_dev->dev); + mhi_chan->mhi_dev = mhi_dev; + + /* Channel name is same for both UL and DL */ + mhi_dev->name = mhi_chan->name; + dev_set_name(&mhi_dev->dev, "%s_%s", + dev_name(&mhi_cntrl->mhi_dev->dev), + mhi_dev->name); + + ret = device_add(&mhi_dev->dev); + if (ret) + put_device(&mhi_dev->dev); + + return ret; +} + +static int mhi_ep_destroy_device(struct device *dev, void *data) +{ + struct mhi_ep_device *mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl; + struct mhi_ep_chan *ul_chan, *dl_chan; + + if (dev->bus != &mhi_ep_bus_type) + return 0; + + mhi_dev = to_mhi_ep_device(dev); + mhi_cntrl = mhi_dev->mhi_cntrl; + + /* Only destroy devices created for channels */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + ul_chan = mhi_dev->ul_chan; + dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + put_device(&ul_chan->mhi_dev->dev); + + if (dl_chan) + put_device(&dl_chan->mhi_dev->dev); + + dev_dbg(&mhi_cntrl->mhi_dev->dev, "Destroying device for chan:%s\n", + mhi_dev->name); + + /* Notify the client and remove the device from MHI bus */ + device_del(dev); + put_device(dev); + + return 0; +} + +static int parse_ch_cfg(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + const struct mhi_ep_channel_config *ch_cfg; + struct device *dev = mhi_cntrl->cntrl_dev; + u32 chan, i; + int ret = -EINVAL; + + mhi_cntrl->max_chan = config->max_channels; + + /* + * Allocate max_channels supported by the MHI endpoint and populate + * only the defined channels + */ + mhi_cntrl->mhi_chan = kcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan), + GFP_KERNEL); + if (!mhi_cntrl->mhi_chan) + return -ENOMEM; + + for (i = 0; i < config->num_channels; i++) { + struct mhi_ep_chan *mhi_chan; + + ch_cfg = &config->ch_cfg[i]; + + chan = ch_cfg->num; + if (chan >= mhi_cntrl->max_chan) { + dev_err(dev, "Channel %d not available\n", chan); + goto error_chan_cfg; + } + + mhi_chan = &mhi_cntrl->mhi_chan[chan]; + mhi_chan->name = ch_cfg->name; + mhi_chan->chan = chan; + mhi_chan->dir = ch_cfg->dir; + mutex_init(&mhi_chan->lock); + + /* Bi-directional and direction less channels are not supported */ + if (mhi_chan->dir == DMA_BIDIRECTIONAL || mhi_chan->dir == DMA_NONE) { + dev_err(dev, "Invalid channel configuration\n"); + goto error_chan_cfg; + } + } + + return 0; + +error_chan_cfg: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} + +/* + * Allocate channel and command rings here. Event rings will be allocated + * in mhi_ep_init_worker() as the config comes from the host. + */ +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config) +{ + struct mhi_ep_device *mhi_dev; + int ret; + + if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) + return -EINVAL; + + ret = parse_ch_cfg(mhi_cntrl, config); + if (ret) + return ret; + + mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); + if (!mhi_cntrl->mhi_cmd) { + ret = -ENOMEM; + goto err_free_ch; + } + + INIT_WORK(&mhi_cntrl->ring_work, mhi_ep_ring_worker); + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); + INIT_WORK(&mhi_cntrl->init_work, mhi_ep_init_worker); + INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); + + mhi_cntrl->ring_wq = alloc_ordered_workqueue("mhi_ep_ring_wq", WQ_HIGHPRI); + if (!mhi_cntrl->ring_wq) { + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cntrl->state_wq = alloc_ordered_workqueue("mhi_ep_state_wq", WQ_HIGHPRI); + if (!mhi_cntrl->state_wq) { + ret = -ENOMEM; + goto err_destroy_ring_wq; + } + + INIT_LIST_HEAD(&mhi_cntrl->ring_transition_list); + INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); + spin_lock_init(&mhi_cntrl->transition_lock); + spin_lock_init(&mhi_cntrl->state_lock); + mutex_init(&mhi_cntrl->event_lock); + + /* Set MHI version and AMSS EE before link up */ + mhi_ep_mmio_write(mhi_cntrl, MHIVER, config->mhi_version); + mhi_ep_mmio_set_env(mhi_cntrl, MHI_EP_AMSS_EE); + + /* Set controller index */ + mhi_cntrl->index = ida_alloc(&mhi_ep_cntrl_ida, GFP_KERNEL); + if (mhi_cntrl->index < 0) { + ret = mhi_cntrl->index; + goto err_destroy_state_wq; + } + + irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN); + ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH, + "doorbell_irq", mhi_cntrl); + if (ret) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ: %d\n", ret); + goto err_ida_free; + } + + /* Allocate the controller device */ + mhi_dev = mhi_ep_alloc_device(mhi_cntrl); + if (IS_ERR(mhi_dev)) { + dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n"); + ret = PTR_ERR(mhi_dev); + goto err_free_irq; + } + + mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; + dev_set_name(&mhi_dev->dev, "mhi_ep%d", mhi_cntrl->index); + mhi_dev->name = dev_name(&mhi_dev->dev); + + ret = device_add(&mhi_dev->dev); + if (ret) + goto err_release_dev; + + mhi_cntrl->mhi_dev = mhi_dev; + + dev_dbg(&mhi_dev->dev, "MHI EP Controller registered\n"); + + return 0; + +err_release_dev: + put_device(&mhi_dev->dev); +err_free_irq: + free_irq(mhi_cntrl->irq, mhi_cntrl); +err_ida_free: + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +err_destroy_state_wq: + destroy_workqueue(mhi_cntrl->state_wq); +err_destroy_ring_wq: + destroy_workqueue(mhi_cntrl->ring_wq); +err_free_cmd: + kfree(mhi_cntrl->mhi_cmd); +err_free_ch: + kfree(mhi_cntrl->mhi_chan); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_register_controller); + +/* + * It is expected that the controller drivers will power down the MHI EP stack + * using "mhi_ep_power_down()" before calling this function to unregister themselves. + */ +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct mhi_ep_device *mhi_dev = mhi_cntrl->mhi_dev; + + destroy_workqueue(mhi_cntrl->state_wq); + destroy_workqueue(mhi_cntrl->ring_wq); + + free_irq(mhi_cntrl->irq, mhi_cntrl); + + kfree(mhi_cntrl->mhi_cmd); + kfree(mhi_cntrl->mhi_event); + kfree(mhi_cntrl->mhi_chan); + + device_del(&mhi_dev->dev); + put_device(&mhi_dev->dev); + + ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); +} +EXPORT_SYMBOL_GPL(mhi_ep_unregister_controller); + +static int mhi_ep_driver_probe(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_ep_chan *ul_chan = mhi_dev->ul_chan; + struct mhi_ep_chan *dl_chan = mhi_dev->dl_chan; + + if (ul_chan) + ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; + + if (dl_chan) + dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; + + return mhi_drv->probe(mhi_dev, mhi_dev->id); +} + +static int mhi_ep_driver_remove(struct device *dev) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(dev->driver); + struct mhi_result result = {}; + struct mhi_ep_chan *mhi_chan; + int dir; + + /* Skip if it is a controller device */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + /* Disconnect the channels associated with the driver */ + for (dir = 0; dir < 2; dir++) { + mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; + + if (!mhi_chan) + continue; + + mutex_lock(&mhi_chan->lock); + /* Send channel disconnect status to the client driver */ + if (mhi_chan->xfer_cb) { + result.transaction_status = -ENOTCONN; + result.bytes_xferd = 0; + mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); + } + + /* Set channel state to DISABLED */ + mhi_chan->state = MHI_CH_STATE_DISABLED; + mhi_chan->xfer_cb = NULL; + mutex_unlock(&mhi_chan->lock); + } + + /* Remove the client driver now */ + mhi_drv->remove(mhi_dev); + + return 0; +} + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner) +{ + struct device_driver *driver = &mhi_drv->driver; + + if (!mhi_drv->probe || !mhi_drv->remove) + return -EINVAL; + + driver->bus = &mhi_ep_bus_type; + driver->owner = owner; + driver->probe = mhi_ep_driver_probe; + driver->remove = mhi_ep_driver_remove; + + return driver_register(driver); +} +EXPORT_SYMBOL_GPL(__mhi_ep_driver_register); + +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv) +{ + driver_unregister(&mhi_drv->driver); +} +EXPORT_SYMBOL_GPL(mhi_ep_driver_unregister); + +static int mhi_ep_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + + return add_uevent_var(env, "MODALIAS=" MHI_EP_DEVICE_MODALIAS_FMT, + mhi_dev->name); +} + +static int mhi_ep_match(struct device *dev, struct device_driver *drv) +{ + struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev); + struct mhi_ep_driver *mhi_drv = to_mhi_ep_driver(drv); + const struct mhi_device_id *id; + + /* + * If the device is a controller type then there is no client driver + * associated with it + */ + if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) + return 0; + + for (id = mhi_drv->id_table; id->chan[0]; id++) + if (!strcmp(mhi_dev->name, id->chan)) { + mhi_dev->id = id; + return 1; + } + + return 0; +}; + +struct bus_type mhi_ep_bus_type = { + .name = "mhi_ep", + .dev_name = "mhi_ep", + .match = mhi_ep_match, + .uevent = mhi_ep_uevent, +}; + +static int __init mhi_ep_init(void) +{ + return bus_register(&mhi_ep_bus_type); +} + +static void __exit mhi_ep_exit(void) +{ + bus_unregister(&mhi_ep_bus_type); +} + +postcore_initcall(mhi_ep_init); +module_exit(mhi_ep_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MHI Bus Endpoint stack"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); diff --git a/drivers/bus/mhi/ep/mmio.c b/drivers/bus/mhi/ep/mmio.c new file mode 100644 index 000000000000..c53c62b8094e --- /dev/null +++ b/drivers/bus/mhi/ep/mmio.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/mhi_ep.h> + +#include "internal.h" + +void mhi_ep_mmio_read(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 *regval) +{ + *regval = readl(mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 val) +{ + writel(val, mhi_cntrl->mmio + offset); +} + +void mhi_ep_mmio_masked_write(struct mhi_ep_cntrl *mhi_cntrl, u32 offset, u32 mask, + u32 shift, u32 val) +{ + u32 regval; + + mhi_ep_mmio_read(mhi_cntrl, offset, ®val); + regval &= ~mask; + regval |= ((val << shift) & mask); + mhi_ep_mmio_write(mhi_cntrl, offset, regval); +} + +int mhi_ep_mmio_masked_read(struct mhi_ep_cntrl *dev, u32 offset, + u32 mask, u32 shift, u32 *regval) +{ + mhi_ep_mmio_read(dev, offset, regval); + *regval &= mask; + *regval >>= shift; + + return 0; +} + +void mhi_ep_mmio_get_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state *state, + bool *mhi_reset) +{ + u32 regval; + + mhi_ep_mmio_read(mhi_cntrl, MHICTRL, ®val); + *state = FIELD_GET(MHICTRL_MHISTATE_MASK, regval); + *mhi_reset = !!FIELD_GET(MHICTRL_RESET_MASK, regval); +} + +static void mhi_ep_mmio_mask_set_chdb_int_a7(struct mhi_ep_cntrl *mhi_cntrl, + u32 chdb_id, bool enable) +{ + u32 chid_mask, chid_idx, chid_shft, val = 0; + + chid_shft = chdb_id % 32; + chid_mask = BIT(chid_shft); + chid_idx = chdb_id / 32; + + if (chid_idx >= MHI_MASK_ROWS_CH_EV_DB) + return; + + if (enable) + val = 1; + + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx), + chid_mask, chid_shft, val); + mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(chid_idx), + &mhi_cntrl->chdb[chid_idx].mask); +} + +void mhi_ep_mmio_enable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id) +{ + mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, true); +} + +void mhi_ep_mmio_disable_chdb_a7(struct mhi_ep_cntrl *mhi_cntrl, u32 chdb_id) +{ + mhi_ep_mmio_mask_set_chdb_int_a7(mhi_cntrl, chdb_id, false); +} + +static void mhi_ep_mmio_set_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val = 0, i = 0; + + if (enable) + val = MHI_CHDB_INT_MASK_A7_n_EN_ALL; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) { + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_MASK_A7_n(i), val); + mhi_cntrl->chdb[i].mask = val; + } +} + +void mhi_ep_mmio_enable_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, true); +} + +void mhi_ep_mmio_mask_chdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_chdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_read_chdb_status_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_ep_mmio_read(mhi_cntrl, MHI_CHDB_INT_STATUS_A7_n(i), + &mhi_cntrl->chdb[i].status); +} + +static void mhi_ep_mmio_set_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl, bool enable) +{ + u32 val = 0, i; + + if (enable) + val = MHI_ERDB_INT_MASK_A7_n_EN_ALL; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_MASK_A7_n(i), val); +} + +void mhi_ep_mmio_mask_erdb_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_set_erdb_interrupts(mhi_cntrl, false); +} + +void mhi_ep_mmio_enable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, + MHI_CTRL_MHICTRL_SHFT, 1); +} + +void mhi_ep_mmio_disable_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_MHICTRL_MASK, + MHI_CTRL_MHICTRL_SHFT, 0); +} + +void mhi_ep_mmio_enable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, + MHI_CTRL_CRDB_SHFT, 1); +} + +void mhi_ep_mmio_disable_cmdb_interrupt(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHI_CTRL_INT_MASK_A7, + MHI_CTRL_CRDB_MASK, + MHI_CTRL_CRDB_SHFT, 0); +} + +void mhi_ep_mmio_mask_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_disable_ctrl_interrupt(mhi_cntrl); + mhi_ep_mmio_disable_cmdb_interrupt(mhi_cntrl); + mhi_ep_mmio_mask_chdb_interrupts(mhi_cntrl); + mhi_ep_mmio_mask_erdb_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_clear_interrupts(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 i = 0; + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_A7_n(i), + MHI_CHDB_INT_CLEAR_A7_n_CLEAR_ALL); + + for (i = 0; i < MHI_MASK_ROWS_CH_EV_DB; i++) + mhi_ep_mmio_write(mhi_cntrl, MHI_ERDB_INT_CLEAR_A7_n(i), + MHI_ERDB_INT_CLEAR_A7_n_CLEAR_ALL); + + mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR_A7, + MHI_CTRL_INT_MMIO_WR_CLEAR | + MHI_CTRL_INT_CRDB_CLEAR | + MHI_CTRL_INT_CRDB_MHICTRL_CLEAR); +} + +void mhi_ep_mmio_get_chc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ccabap_value = 0; + + mhi_ep_mmio_read(mhi_cntrl, CCABAP_HIGHER, &ccabap_value); + mhi_cntrl->ch_ctx_host_pa = ccabap_value; + mhi_cntrl->ch_ctx_host_pa <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, CCABAP_LOWER, &ccabap_value); + mhi_cntrl->ch_ctx_host_pa |= ccabap_value; +} + +void mhi_ep_mmio_get_erc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 ecabap_value = 0; + + mhi_ep_mmio_read(mhi_cntrl, ECABAP_HIGHER, &ecabap_value); + mhi_cntrl->ev_ctx_host_pa = ecabap_value; + mhi_cntrl->ev_ctx_host_pa <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, ECABAP_LOWER, &ecabap_value); + mhi_cntrl->ev_ctx_host_pa |= ecabap_value; +} + +void mhi_ep_mmio_get_crc_base(struct mhi_ep_cntrl *mhi_cntrl) +{ + u32 crcbap_value = 0; + + mhi_ep_mmio_read(mhi_cntrl, CRCBAP_HIGHER, &crcbap_value); + mhi_cntrl->cmd_ctx_host_pa = crcbap_value; + mhi_cntrl->cmd_ctx_host_pa <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, CRCBAP_LOWER, &crcbap_value); + mhi_cntrl->cmd_ctx_host_pa |= crcbap_value; +} + +void mhi_ep_mmio_get_ch_db(struct mhi_ep_ring *ring, u64 *wr_ptr) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u32 value = 0; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value); + *wr_ptr = value; + *wr_ptr <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value); + + *wr_ptr |= value; +} + +void mhi_ep_mmio_get_er_db(struct mhi_ep_ring *ring, u64 *wr_ptr) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u32 value = 0; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value); + *wr_ptr = value; + *wr_ptr <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value); + + *wr_ptr |= value; +} + +void mhi_ep_mmio_get_cmd_db(struct mhi_ep_ring *ring, u64 *wr_ptr) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + u32 value = 0; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_h, &value); + *wr_ptr = value; + *wr_ptr <<= 32; + + mhi_ep_mmio_read(mhi_cntrl, ring->db_offset_l, &value); + *wr_ptr |= value; +} + +void mhi_ep_mmio_set_env(struct mhi_ep_cntrl *mhi_cntrl, u32 value) +{ + mhi_ep_mmio_write(mhi_cntrl, BHI_EXECENV, value); +} + +void mhi_ep_mmio_clear_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_masked_write(mhi_cntrl, MHICTRL, MHICTRL_RESET_MASK, + MHICTRL_RESET_SHIFT, 0); +} + +void mhi_ep_mmio_reset(struct mhi_ep_cntrl *mhi_cntrl) +{ + mhi_ep_mmio_write(mhi_cntrl, MHICTRL, 0); + mhi_ep_mmio_write(mhi_cntrl, MHISTATUS, 0); + mhi_ep_mmio_clear_interrupts(mhi_cntrl); +} + +void mhi_ep_mmio_init(struct mhi_ep_cntrl *mhi_cntrl) +{ + int mhi_cfg = 0; + + mhi_ep_mmio_read(mhi_cntrl, MHIREGLEN, &mhi_cntrl->reg_len); + mhi_ep_mmio_read(mhi_cntrl, CHDBOFF, &mhi_cntrl->chdb_offset); + mhi_ep_mmio_read(mhi_cntrl, ERDBOFF, &mhi_cntrl->erdb_offset); + + mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg); + + mhi_ep_mmio_reset(mhi_cntrl); +} + +void mhi_ep_mmio_update_ner(struct mhi_ep_cntrl *mhi_cntrl) +{ + int mhi_cfg = 0; + + mhi_ep_mmio_read(mhi_cntrl, MHICFG, &mhi_cfg); + mhi_cntrl->event_rings = FIELD_GET(MHICFG_NER_MASK, mhi_cfg); + mhi_cntrl->hw_event_rings = FIELD_GET(MHICFG_NHWER_MASK, mhi_cfg); +} diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c new file mode 100644 index 000000000000..d3e0bbd45187 --- /dev/null +++ b/drivers/bus/mhi/ep/ring.c @@ -0,0 +1,317 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/mhi_ep.h> +#include "internal.h" + +size_t mhi_ep_ring_addr2offset(struct mhi_ep_ring *ring, u64 ptr) +{ + u64 rbase; + + rbase = ring->ring_ctx->generic.rbase; + + return (ptr - rbase) / sizeof(struct mhi_ep_ring_element); +} + +static u32 mhi_ep_ring_num_elems(struct mhi_ep_ring *ring) +{ + return ring->ring_ctx->generic.rlen / sizeof(struct mhi_ep_ring_element); +} + +void mhi_ep_ring_inc_index(struct mhi_ep_ring *ring) +{ + ring->rd_offset++; + if (ring->rd_offset == ring->ring_size) + ring->rd_offset = 0; +} + +int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + size_t start, copy_size; + struct mhi_ep_ring_element *ring_shadow; + phys_addr_t ring_shadow_phys; + size_t size = ring->ring_size * sizeof(struct mhi_ep_ring_element); + int ret; + + /* No need to cache event rings */ + if (ring->type == RING_TYPE_ER) + return 0; + + /* No need to cache the ring if write pointer is unmodified */ + if (ring->wr_offset == end) + return 0; + + start = ring->wr_offset; + + /* Allocate memory for host ring */ + ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys, + size); + if (!ring_shadow) { + dev_err(dev, "Failed to allocate memory for ring_shadow\n"); + return -ENOMEM; + } + + /* Map host ring */ + ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys, + ring->ring_ctx->generic.rbase, size); + if (ret) { + dev_err(dev, "Failed to map ring_shadow\n\n"); + goto err_ring_free; + } + + dev_dbg(dev, "Caching ring: start %d end %d size %d", start, end, copy_size); + + if (start < end) { + copy_size = (end - start) * sizeof(struct mhi_ep_ring_element); + memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size); + } else { + copy_size = (ring->ring_size - start) * sizeof(struct mhi_ep_ring_element); + memcpy_fromio(&ring->ring_cache[start], &ring_shadow[start], copy_size); + if (end) + memcpy_fromio(&ring->ring_cache[0], &ring_shadow[0], + end * sizeof(struct mhi_ep_ring_element)); + } + + /* Now unmap and free host ring */ + mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys); + mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, size); + + return 0; + +err_ring_free: + mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, &ring_shadow, size); + + return ret; +} + +int mhi_ep_cache_ring(struct mhi_ep_ring *ring, u64 wr_ptr) +{ + size_t wr_offset; + int ret; + + wr_offset = mhi_ep_ring_addr2offset(ring, wr_ptr); + + /* Cache the host ring till write offset */ + ret = __mhi_ep_cache_ring(ring, wr_offset); + if (ret) + return ret; + + ring->wr_offset = wr_offset; + + return 0; +} + +int mhi_ep_update_wr_offset(struct mhi_ep_ring *ring) +{ + u64 wr_ptr; + + switch (ring->type) { + case RING_TYPE_CMD: + mhi_ep_mmio_get_cmd_db(ring, &wr_ptr); + break; + case RING_TYPE_ER: + mhi_ep_mmio_get_er_db(ring, &wr_ptr); + break; + case RING_TYPE_CH: + mhi_ep_mmio_get_ch_db(ring, &wr_ptr); + break; + default: + return -EINVAL; + } + + return mhi_ep_cache_ring(ring, wr_ptr); +} + +int mhi_ep_process_ring_element(struct mhi_ep_ring *ring, size_t offset) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_element *el; + int ret = -ENODEV; + + /* Get the element and invoke the respective callback */ + el = &ring->ring_cache[offset]; + + if (ring->ring_cb) + ret = ring->ring_cb(ring, el); + else + dev_err(dev, "No callback registered for ring\n"); + + return ret; +} + +int mhi_ep_process_ring(struct mhi_ep_ring *ring) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret = 0; + + /* Event rings should not be processed */ + if (ring->type == RING_TYPE_ER) + return -EINVAL; + + dev_dbg(dev, "Processing ring of type: %d\n", ring->type); + + /* Update the write offset for the ring */ + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write offset for ring\n"); + return ret; + } + + /* Process channel ring first */ + if (ring->type == RING_TYPE_CH) { + ret = mhi_ep_process_ring_element(ring, ring->rd_offset); + if (ret) + dev_err(dev, "Error processing ch ring element: %d\n", ring->rd_offset); + + return ret; + } + + /* Process command ring now */ + while (ring->rd_offset != ring->wr_offset) { + ret = mhi_ep_process_ring_element(ring, ring->rd_offset); + if (ret) { + dev_err(dev, "Error processing cmd ring element: %d\n", ring->rd_offset); + return ret; + } + + mhi_ep_ring_inc_index(ring); + } + + if (ring->rd_offset != ring->wr_offset) { + dev_err(dev, "Error processing the ring\n"); + return -EINVAL; + } + + return 0; +} + +/* TODO: Support for adding multiple ring elements to the ring */ +int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ep_ring_element *element, int size) +{ + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_ring_element *ring_shadow; + size_t ring_size = ring->ring_size * sizeof(struct mhi_ep_ring_element); + phys_addr_t ring_shadow_phys; + size_t old_offset = 0; + u32 num_free_elem; + int ret; + + ret = mhi_ep_update_wr_offset(ring); + if (ret) { + dev_err(dev, "Error updating write pointer\n"); + return ret; + } + + if (ring->rd_offset < ring->wr_offset) + num_free_elem = (ring->wr_offset - ring->rd_offset) - 1; + else + num_free_elem = ((ring->ring_size - ring->rd_offset) + ring->wr_offset) - 1; + + /* Check if there is space in ring for adding at least an element */ + if (num_free_elem < 1) { + dev_err(dev, "No space left in the ring\n"); + return -ENOSPC; + } + + old_offset = ring->rd_offset; + mhi_ep_ring_inc_index(ring); + + dev_dbg(dev, "Adding an element to ring at offset (%d)\n", ring->rd_offset); + + /* Update rp in ring context */ + ring->ring_ctx->generic.rp = (ring->rd_offset * sizeof(struct mhi_ep_ring_element)) + + ring->ring_ctx->generic.rbase; + + /* Allocate memory for host ring */ + ring_shadow = mhi_cntrl->alloc_addr(mhi_cntrl, &ring_shadow_phys, ring_size); + if (!ring_shadow) { + dev_err(dev, "failed to allocate ring_shadow\n"); + return -ENOMEM; + } + + /* Map host ring */ + ret = mhi_cntrl->map_addr(mhi_cntrl, ring_shadow_phys, + ring->ring_ctx->generic.rbase, ring_size); + if (ret) { + dev_err(dev, "failed to map ring_shadow\n\n"); + goto err_ring_free; + } + + /* Copy the element to ring */ + memcpy_toio(&ring_shadow[old_offset], element, sizeof(struct mhi_ep_ring_element)); + + /* Now unmap and free host ring */ + mhi_cntrl->unmap_addr(mhi_cntrl, ring_shadow_phys); + mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size); + + return 0; + +err_ring_free: + mhi_cntrl->free_addr(mhi_cntrl, ring_shadow_phys, ring_shadow, ring_size); + + return ret; +} + +void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) +{ + ring->state = RING_STATE_UINT; + ring->type = type; + if (ring->type == RING_TYPE_CMD) { + ring->ring_cb = mhi_ep_process_cmd_ring; + ring->db_offset_h = CRDB_HIGHER; + ring->db_offset_l = CRDB_LOWER; + } else if (ring->type == RING_TYPE_CH) { + ring->ring_cb = mhi_ep_process_tre_ring; + ring->db_offset_h = CHDB_HIGHER_n(id); + ring->db_offset_l = CHDB_LOWER_n(id); + ring->ch_id = id; + } else if (ring->type == RING_TYPE_ER) { + ring->db_offset_h = ERDB_HIGHER_n(id); + ring->db_offset_l = ERDB_LOWER_n(id); + } +} + +int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, + union mhi_ep_ring_ctx *ctx) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + ring->mhi_cntrl = mhi_cntrl; + ring->ring_ctx = ctx; + ring->ring_size = mhi_ep_ring_num_elems(ring); + + /* During ring init, both rp and wp are equal */ + ring->rd_offset = mhi_ep_ring_addr2offset(ring, ring->ring_ctx->generic.rp); + ring->wr_offset = mhi_ep_ring_addr2offset(ring, ring->ring_ctx->generic.rp); + ring->state = RING_STATE_IDLE; + + /* Allocate ring cache memory for holding the copy of host ring */ + ring->ring_cache = kcalloc(ring->ring_size, sizeof(struct mhi_ep_ring_element), + GFP_KERNEL); + if (!ring->ring_cache) + return -ENOMEM; + + ret = mhi_ep_cache_ring(ring, ring->ring_ctx->generic.wp); + if (ret) { + dev_err(dev, "Failed to cache ring\n"); + kfree(ring->ring_cache); + return ret; + } + + return 0; +} + +void mhi_ep_ring_stop(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) +{ + ring->state = RING_STATE_UINT; + kfree(ring->ring_cache); +} diff --git a/drivers/bus/mhi/ep/sm.c b/drivers/bus/mhi/ep/sm.c new file mode 100644 index 000000000000..0038c69c6a1f --- /dev/null +++ b/drivers/bus/mhi/ep/sm.c @@ -0,0 +1,271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/mhi_ep.h> +#include "internal.h" + +const char * const mhi_ep_state_str[MHI_STATE_MAX] = { + [MHI_STATE_RESET] = "RESET", + [MHI_STATE_READY] = "READY", + [MHI_STATE_M0] = "M0", + [MHI_STATE_M1] = "M1", + [MHI_STATE_M2] = "M2", + [MHI_STATE_M3] = "M3", + [MHI_STATE_M3_FAST] = "M3 FAST", + [MHI_STATE_BHI] = "BHI", + [MHI_STATE_SYS_ERR] = "SYS ERROR", +}; + +const char * const mhi_ep_link_state_str[LINK_STATE_MAX] = { + [LINK_STATE_UP] = "LINK_UP", + [LINK_STATE_DOWN] = "LINK_DOWN", + [LINK_STATE_BME] = "BME", + [LINK_STATE_D0] = "D0", + [LINK_STATE_D3] = "D3", + [LINK_STATE_D3_COLD] = "D3_COLD", +}; + +bool __must_check mhi_ep_check_link_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_ep_link_state link_state, + enum mhi_ep_link_state cur_link_state, + enum mhi_state mhi_state) +{ + bool valid = false; + + switch (link_state) { + case LINK_STATE_UP: + case LINK_STATE_DOWN: + case LINK_STATE_BME: + /* Link up, Link down and BME states are allowed all the time */ + valid = true; + break; + case LINK_STATE_D0: + valid = ((cur_link_state == LINK_STATE_BME || + cur_link_state == LINK_STATE_UP || + cur_link_state == LINK_STATE_D3) && + (mhi_state == MHI_STATE_RESET || + mhi_state == MHI_STATE_M3)); + break; + case LINK_STATE_D3: + valid = (cur_link_state == LINK_STATE_BME || + (cur_link_state == LINK_STATE_D0 && + mhi_state == MHI_STATE_M3)); + break; + default: + break; + } + + return valid; +} + +int mhi_ep_set_link_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_ep_link_state link_state) +{ + enum mhi_ep_link_state cur_link_state = mhi_cntrl->link_state; + enum mhi_state cur_mhi_state; + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret = 0; + + spin_lock(&mhi_cntrl->state_lock); + cur_mhi_state = mhi_cntrl->mhi_state; + if (!mhi_ep_check_link_state(mhi_cntrl, link_state, cur_link_state, cur_mhi_state)) { + dev_err(dev, "MHI link state change to %s from %s:%s is not allowed!\n", + TO_LINK_STATE_STR(link_state), + TO_LINK_STATE_STR(mhi_cntrl->link_state), + TO_MHI_STATE_STR(mhi_cntrl->mhi_state)); + mhi_ep_handle_syserr(mhi_cntrl); + ret = -EACCES; + goto err_unlock; + } + + mhi_cntrl->link_state = link_state; + +err_unlock: + spin_unlock(&mhi_cntrl->state_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(mhi_ep_set_link_state); + +bool __must_check mhi_ep_check_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, + enum mhi_state cur_mhi_state, + enum mhi_state mhi_state) +{ + enum mhi_ep_link_state link_state = mhi_cntrl->link_state; + bool valid = false; + + switch (mhi_state) { + case MHI_STATE_READY: + valid = ((link_state == LINK_STATE_BME) && + cur_mhi_state == MHI_STATE_RESET); + break; + case MHI_STATE_M0: + valid = ((link_state == LINK_STATE_BME || + link_state == LINK_STATE_D0) && + (cur_mhi_state == MHI_STATE_READY || + cur_mhi_state == MHI_STATE_M3)); + break; + case MHI_STATE_M3: + valid = ((link_state == LINK_STATE_BME || + link_state == LINK_STATE_D0) && + cur_mhi_state == MHI_STATE_M0); + break; + case MHI_STATE_SYS_ERR: + /* Transition to SYS_ERR state is allowed all the time */ + valid = true; + break; + default: + break; + } + + return valid; +} + +int mhi_ep_set_mhi_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state mhi_state) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + + if (!mhi_ep_check_mhi_state(mhi_cntrl, mhi_cntrl->mhi_state, mhi_state)) { + dev_err(dev, "MHI state change to %s from %s:%s is not allowed!\n", + TO_MHI_STATE_STR(mhi_state), + TO_MHI_STATE_STR(mhi_cntrl->mhi_state), + TO_LINK_STATE_STR(mhi_cntrl->link_state)); + return -EACCES; + } + + switch (mhi_state) { + case MHI_STATE_READY: + mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS, + MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, 1); + + mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, mhi_state); + break; + case MHI_STATE_SYS_ERR: + mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS, + MHISTATUS_SYSERR_MASK, + MHISTATUS_SYSERR_SHIFT, 1); + + mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, mhi_state); + break; + case MHI_STATE_M1: + case MHI_STATE_M2: + dev_err(dev, "MHI state (%s) not supported\n", TO_MHI_STATE_STR(mhi_state)); + return -EINVAL; + case MHI_STATE_M0: + case MHI_STATE_M3: + mhi_ep_mmio_masked_write(mhi_cntrl, MHISTATUS, + MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, mhi_state); + break; + default: + dev_err(dev, "Invalid MHI state (%d)", mhi_state); + return -EINVAL; + } + + mhi_cntrl->mhi_state = mhi_state; + + return 0; +} + +int mhi_ep_set_m0_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state old_state; + int ret; + + /* If MHI is in M3, resume suspended channels */ + spin_lock_bh(&mhi_cntrl->state_lock); + old_state = mhi_cntrl->mhi_state; + if (old_state == MHI_STATE_M3) + mhi_ep_resume_channels(mhi_cntrl); + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + spin_unlock_bh(&mhi_cntrl->state_lock); + return ret; + } + + spin_unlock_bh(&mhi_cntrl->state_lock); + /* Signal host that the device moved to M0 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M0); + if (ret) { + dev_err(dev, "Failed sending M0 state change event: %d\n", ret); + return ret; + } + + if (old_state == MHI_STATE_READY) { + /* Allow the host to process state change event */ + mdelay(1); + + /* Send AMSS EE event to host */ + ret = mhi_ep_send_ee_event(mhi_cntrl, MHI_EP_AMSS_EE); + if (ret) { + dev_err(dev, "Failed sending AMSS EE event: %d\n", ret); + return ret; + } + } + + return 0; +} + +int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + int ret; + + spin_lock_bh(&mhi_cntrl->state_lock); + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M3); + if (ret) { + mhi_ep_handle_syserr(mhi_cntrl); + spin_unlock_bh(&mhi_cntrl->state_lock); + return ret; + } + + spin_unlock_bh(&mhi_cntrl->state_lock); + mhi_ep_suspend_channels(mhi_cntrl); + + /* Signal host that the device moved to M3 */ + ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3); + if (ret) { + dev_err(dev, "Failed sending M3 state change event: %d\n", ret); + return ret; + } + + return 0; +} + +int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + enum mhi_state mhi_state; + int ret, is_ready; + + spin_lock_bh(&mhi_cntrl->state_lock); + /* Ensure that the MHISTATUS is set to RESET by host */ + mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_MHISTATE_MASK, + MHISTATUS_MHISTATE_SHIFT, &mhi_state); + mhi_ep_mmio_masked_read(mhi_cntrl, MHISTATUS, MHISTATUS_READY_MASK, + MHISTATUS_READY_SHIFT, &is_ready); + + if (mhi_state != MHI_STATE_RESET || is_ready) { + dev_err(dev, "READY state transition failed. MHI host not in RESET state\n"); + spin_unlock_bh(&mhi_cntrl->state_lock); + return -EFAULT; + } + + ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_READY); + spin_unlock_bh(&mhi_cntrl->state_lock); + + return ret; +} diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f37b1c56f7c4..1897293d2e87 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -436,6 +436,14 @@ config MHI_NET QCOM based WWAN modems for IP or QMAP/rmnet protocol (like SDX55). Say Y or M. +config MHI_EP_NET + tristate "MHI Endpoint network driver" + depends on MHI_BUS_EP + help + This is the network driver for MHI Endpoint bus providing network + interface to QCOM modems such as SDX55. + Say Y or M. + endif # NET_CORE config SUNGEM_PHY diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 739838623cf6..90145c0ffa2b 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_NLMON) += nlmon.o obj-$(CONFIG_NET_VRF) += vrf.o obj-$(CONFIG_VSOCKMON) += vsockmon.o obj-$(CONFIG_MHI_NET) += mhi_net.o +obj-$(CONFIG_MHI_EP_NET) += mhi_ep_net.o # # Networking Drivers diff --git a/drivers/net/mhi_ep_net.c b/drivers/net/mhi_ep_net.c new file mode 100644 index 000000000000..da323b66a5e9 --- /dev/null +++ b/drivers/net/mhi_ep_net.c @@ -0,0 +1,376 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MHI Endpoint Network driver + * + * Based on drivers/net/mhi_ep_net.c + * + * Copyright (c) 2021, Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org + */ + +#include <linux/if_arp.h> +#include <linux/mhi_ep.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/u64_stats_sync.h> + +#define MHI_NET_MIN_MTU ETH_MIN_MTU +#define MHI_NET_MAX_MTU 0xffff +#define MHI_EP_DEFAULT_MTU 0x4000 + +struct mhi_ep_net_stats { + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_errors; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_errors; + u64_stats_t tx_dropped; + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; +}; + +struct mhi_ep_net_dev { + struct mhi_ep_device *mdev; + struct net_device *ndev; + struct sk_buff *skbagg_head; + struct sk_buff *skbagg_tail; + struct mhi_ep_net_stats stats; + struct workqueue_struct *xmit_wq; + struct work_struct xmit_work; + struct sk_buff_head tx_buffers; + spinlock_t wrt_lock; +}; + +struct mhi_ep_device_info { + const char *netname; +}; + +static void mhi_ep_net_dev_process_queue_packets(struct work_struct *work) +{ + struct mhi_ep_net_dev *client = container_of(work, + struct mhi_ep_net_dev, xmit_work); + struct mhi_ep_device *mdev = client->mdev; + struct sk_buff *skb = NULL; + unsigned long flags = 0; + int ret; + + if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) { + netif_stop_queue(client->ndev); + return; + } + + while (!skb_queue_empty(&client->tx_buffers)) { + spin_lock_irqsave(&client->wrt_lock, flags); + skb = skb_dequeue(&client->tx_buffers); + if (!skb) { + spin_unlock_irqrestore(&client->wrt_lock, flags); + return; + } + spin_unlock_irqrestore(&client->wrt_lock, flags); + + ret = mhi_ep_queue_skb(mdev, DMA_FROM_DEVICE, skb, skb->len, MHI_EOT); + if (ret) { + kfree(skb); + return; + } + + u64_stats_update_begin(&client->stats.tx_syncp); + u64_stats_inc(&client->stats.tx_packets); + u64_stats_update_end(&client->stats.tx_syncp); + + /* Check if queue is empty */ + if (mhi_ep_queue_is_empty(mdev, DMA_FROM_DEVICE)) { + netif_stop_queue(client->ndev); + break; + } + + consume_skb(skb); + } +} + +static int mhi_ndo_open(struct net_device *ndev) +{ + /* Carrier is established via out-of-band channel (e.g. qmi) */ + netif_carrier_on(ndev); + + netif_start_queue(ndev); + + return 0; +} + +static int mhi_ndo_stop(struct net_device *ndev) +{ + netif_stop_queue(ndev); + netif_carrier_off(ndev); + + return 0; +} + +static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev); + unsigned long flags; + + spin_lock_irqsave(&mhi_ep_netdev->wrt_lock, flags); + skb_queue_tail(&mhi_ep_netdev->tx_buffers, skb); + spin_unlock_irqrestore(&mhi_ep_netdev->wrt_lock, flags); + + queue_work(mhi_ep_netdev->xmit_wq, + &mhi_ep_netdev->xmit_work); + + return NETDEV_TX_OK; +} + +static void mhi_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.rx_syncp); + stats->rx_packets = u64_stats_read(&mhi_ep_netdev->stats.rx_packets); + stats->rx_bytes = u64_stats_read(&mhi_ep_netdev->stats.rx_bytes); + stats->rx_errors = u64_stats_read(&mhi_ep_netdev->stats.rx_errors); + } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.rx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&mhi_ep_netdev->stats.tx_syncp); + stats->tx_packets = u64_stats_read(&mhi_ep_netdev->stats.tx_packets); + stats->tx_bytes = u64_stats_read(&mhi_ep_netdev->stats.tx_bytes); + stats->tx_errors = u64_stats_read(&mhi_ep_netdev->stats.tx_errors); + stats->tx_dropped = u64_stats_read(&mhi_ep_netdev->stats.tx_dropped); + } while (u64_stats_fetch_retry_irq(&mhi_ep_netdev->stats.tx_syncp, start)); +} + +static const struct net_device_ops mhi_ep_netdev_ops = { + .ndo_open = mhi_ndo_open, + .ndo_stop = mhi_ndo_stop, + .ndo_start_xmit = mhi_ndo_xmit, + .ndo_get_stats64 = mhi_ndo_get_stats64, +}; + +static void mhi_ep_net_setup(struct net_device *ndev) +{ + ndev->header_ops = NULL; /* No header */ + ndev->type = ARPHRD_RAWIP; + ndev->hard_header_len = 0; + ndev->addr_len = 0; + ndev->flags = IFF_POINTOPOINT | IFF_NOARP; + ndev->netdev_ops = &mhi_ep_netdev_ops; + ndev->mtu = MHI_EP_DEFAULT_MTU; + ndev->min_mtu = MHI_NET_MIN_MTU; + ndev->max_mtu = MHI_NET_MAX_MTU; + ndev->tx_queue_len = 1000; +} + +static struct sk_buff *mhi_ep_net_skb_agg(struct mhi_ep_net_dev *mhi_ep_netdev, + struct sk_buff *skb) +{ + struct sk_buff *head = mhi_ep_netdev->skbagg_head; + struct sk_buff *tail = mhi_ep_netdev->skbagg_tail; + + /* This is non-paged skb chaining using frag_list */ + if (!head) { + mhi_ep_netdev->skbagg_head = skb; + return skb; + } + + if (!skb_shinfo(head)->frag_list) + skb_shinfo(head)->frag_list = skb; + else + tail->next = skb; + + head->len += skb->len; + head->data_len += skb->len; + head->truesize += skb->truesize; + + mhi_ep_netdev->skbagg_tail = skb; + + return mhi_ep_netdev->skbagg_head; +} + +static void mhi_ep_net_ul_callback(struct mhi_ep_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev); + struct net_device *ndev = mhi_ep_netdev->ndev; + struct sk_buff *skb; + + skb = netdev_alloc_skb(ndev, 8192); + if (!skb) { + u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_ep_netdev->stats.rx_errors); + u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp); + } + + skb_copy_to_linear_data(skb, mhi_res->buf_addr, mhi_res->bytes_xferd); + skb->len = mhi_res->bytes_xferd; + skb->dev = mhi_ep_netdev->ndev; + + if (unlikely(mhi_res->transaction_status)) { + switch (mhi_res->transaction_status) { + case -EOVERFLOW: + /* Packet can not fit in one MHI buffer and has been + * split over multiple MHI transfers, do re-aggregation. + * That usually means the device side MTU is larger than + * the host side MTU/MRU. Since this is not optimal, + * print a warning (once). + */ + netdev_warn_once(mhi_ep_netdev->ndev, + "Fragmented packets received, fix MTU?\n"); + skb_put(skb, mhi_res->bytes_xferd); + mhi_ep_net_skb_agg(mhi_ep_netdev, skb); + break; + case -ENOTCONN: + /* MHI layer stopping/resetting the DL channel */ + dev_kfree_skb_any(skb); + return; + default: + /* Unknown error, simply drop */ + dev_kfree_skb_any(skb); + u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_ep_netdev->stats.rx_errors); + u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp); + } + } else { + skb_put(skb, mhi_res->bytes_xferd); + + if (mhi_ep_netdev->skbagg_head) { + /* Aggregate the final fragment */ + skb = mhi_ep_net_skb_agg(mhi_ep_netdev, skb); + mhi_ep_netdev->skbagg_head = NULL; + } + + switch (skb->data[0] & 0xf0) { + case 0x40: + skb->protocol = htons(ETH_P_IP); + break; + case 0x60: + skb->protocol = htons(ETH_P_IPV6); + break; + default: + skb->protocol = htons(ETH_P_MAP); + break; + } + + u64_stats_update_begin(&mhi_ep_netdev->stats.rx_syncp); + u64_stats_inc(&mhi_ep_netdev->stats.rx_packets); + u64_stats_add(&mhi_ep_netdev->stats.rx_bytes, skb->len); + u64_stats_update_end(&mhi_ep_netdev->stats.rx_syncp); + netif_receive_skb(skb); + } +} + +static void mhi_ep_net_dl_callback(struct mhi_ep_device *mhi_dev, + struct mhi_result *mhi_res) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev); + + if (unlikely(mhi_res->transaction_status == -ENOTCONN)) + return; + + /* Since we got enough buffers to queue, wake the queue if stopped */ + if (netif_queue_stopped(mhi_ep_netdev->ndev)) { + netif_wake_queue(mhi_ep_netdev->ndev); + queue_work(mhi_ep_netdev->xmit_wq, &mhi_ep_netdev->xmit_work); + } +} + +static int mhi_ep_net_newlink(struct mhi_ep_device *mhi_dev, struct net_device *ndev) +{ + struct mhi_ep_net_dev *mhi_ep_netdev; + int err; + + mhi_ep_netdev = netdev_priv(ndev); + + dev_set_drvdata(&mhi_dev->dev, mhi_ep_netdev); + mhi_ep_netdev->ndev = ndev; + mhi_ep_netdev->mdev = mhi_dev; + mhi_ep_netdev->skbagg_head = NULL; + + skb_queue_head_init(&mhi_ep_netdev->tx_buffers); + + u64_stats_init(&mhi_ep_netdev->stats.rx_syncp); + u64_stats_init(&mhi_ep_netdev->stats.tx_syncp); + + mhi_ep_netdev->xmit_wq = create_singlethread_workqueue("mhi_ep_net_xmit_wq"); + INIT_WORK(&mhi_ep_netdev->xmit_work, mhi_ep_net_dev_process_queue_packets); + err = register_netdev(ndev); + if (err) + return err; + + return 0; +} + +static void mhi_ep_net_dellink(struct mhi_ep_device *mhi_dev, struct net_device *ndev) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = netdev_priv(ndev); + + destroy_workqueue(mhi_ep_netdev->xmit_wq); + unregister_netdev(ndev); + kfree_skb(mhi_ep_netdev->skbagg_head); + dev_set_drvdata(&mhi_dev->dev, NULL); +} + +static int mhi_ep_net_probe(struct mhi_ep_device *mhi_dev, + const struct mhi_device_id *id) +{ + const struct mhi_ep_device_info *info = (struct mhi_ep_device_info *)id->driver_data; + struct net_device *ndev; + int err; + + ndev = alloc_netdev(sizeof(struct mhi_ep_net_dev), info->netname, + NET_NAME_PREDICTABLE, mhi_ep_net_setup); + if (!ndev) + return -ENOMEM; + + SET_NETDEV_DEV(ndev, &mhi_dev->dev); + + err = mhi_ep_net_newlink(mhi_dev, ndev); + if (err) { + free_netdev(ndev); + return err; + } + + return 0; +} + +static void mhi_ep_net_remove(struct mhi_ep_device *mhi_dev) +{ + struct mhi_ep_net_dev *mhi_ep_netdev = dev_get_drvdata(&mhi_dev->dev); + + mhi_ep_net_dellink(mhi_dev, mhi_ep_netdev->ndev); +} + +static const struct mhi_ep_device_info mhi_swip0 = { + .netname = "mhi_swip%d", +}; + +static const struct mhi_device_id mhi_ep_net_id_table[] = { + /* Software data PATH (from modem CPU) */ + { .chan = "IP_SW0", .driver_data = (kernel_ulong_t)&mhi_swip0 }, + {} +}; +MODULE_DEVICE_TABLE(mhi, mhi_ep_net_id_table); + +static struct mhi_ep_driver mhi_ep_net_driver = { + .probe = mhi_ep_net_probe, + .remove = mhi_ep_net_remove, + .dl_xfer_cb = mhi_ep_net_dl_callback, + .ul_xfer_cb = mhi_ep_net_ul_callback, + .id_table = mhi_ep_net_id_table, + .driver = { + .name = "mhi_ep_net", + .owner = THIS_MODULE, + }, +}; + +module_mhi_ep_driver(mhi_ep_net_driver); + +MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>"); +MODULE_DESCRIPTION("Network over MHI"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 76c0a63a3f64..49bcbe18a575 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig @@ -180,6 +180,16 @@ config PCIE_QCOM PCIe controller uses the DesignWare core plus Qualcomm-specific hardware wrappers. +config PCIE_QCOM_EP + tristate "Qualcomm PCIe controller - Endpoint mode" + depends on OF && (ARCH_QCOM || COMPILE_TEST) + depends on PCI_ENDPOINT + select PCIE_DW_EP + help + Say Y here to enable support for the PCIe controllers on Qualcomm SoCs + to work in endpoint mode. The PCIe controller uses the DesignWare core + plus Qualcomm-specific hardware wrappers. + config PCIE_ARMADA_8K bool "Marvell Armada-8K PCIe controller" depends on ARCH_MVEBU || COMPILE_TEST diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index 73244409792c..8ba7b67f5e50 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_LAYERSCAPE_EP) += pci-layerscape-ep.o obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o +obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o obj-$(CONFIG_PCIE_ROCKCHIP_DW_HOST) += pcie-dw-rockchip.o diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 998b698f4085..0eda8236c125 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -83,6 +83,7 @@ void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) for (func_no = 0; func_no < funcs; func_no++) __dw_pcie_ep_reset_bar(pci, func_no, bar, 0); } +EXPORT_SYMBOL_GPL(dw_pcie_ep_reset_bar); static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie_ep *ep, u8 func_no, u8 cap_ptr, u8 cap) @@ -485,6 +486,7 @@ int dw_pcie_ep_raise_legacy_irq(struct dw_pcie_ep *ep, u8 func_no) return -EINVAL; } +EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_legacy_irq); int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, u8 interrupt_num) @@ -536,6 +538,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, return 0; } +EXPORT_SYMBOL_GPL(dw_pcie_ep_raise_msi_irq); int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no, u16 interrupt_num) diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c new file mode 100644 index 000000000000..4f30a0621566 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -0,0 +1,712 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Qualcomm PCIe Endpoint controller driver + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Author: Siddartha Mohanadoss <smohanad@codeaurora.org + * + * Copyright (c) 2021, Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/mfd/syscon.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/regmap.h> +#include <linux/reset.h> + +#include "pcie-designware.h" + +/* PARF registers */ +#define PARF_SYS_CTRL 0x00 +#define PARF_DB_CTRL 0x10 +#define PARF_PM_CTRL 0x20 +#define PARF_MHI_BASE_ADDR_LOWER 0x178 +#define PARF_MHI_BASE_ADDR_UPPER 0x17c +#define PARF_DEBUG_INT_EN 0x190 +#define PARF_AXI_MSTR_RD_HALT_NO_WRITES 0x1a4 +#define PARF_AXI_MSTR_WR_ADDR_HALT 0x1a8 +#define PARF_Q2A_FLUSH 0x1ac +#define PARF_LTSSM 0x1b0 +#define PARF_CFG_BITS 0x210 +#define PARF_INT_ALL_STATUS 0x224 +#define PARF_INT_ALL_CLEAR 0x228 +#define PARF_INT_ALL_MASK 0x22c +#define PARF_SLV_ADDR_MSB_CTRL 0x2c0 +#define PARF_DBI_BASE_ADDR 0x350 +#define PARF_DBI_BASE_ADDR_HI 0x354 +#define PARF_SLV_ADDR_SPACE_SIZE 0x358 +#define PARF_SLV_ADDR_SPACE_SIZE_HI 0x35c +#define PARF_ATU_BASE_ADDR 0x634 +#define PARF_ATU_BASE_ADDR_HI 0x638 +#define PARF_SRIS_MODE 0x644 +#define PARF_DEVICE_TYPE 0x1000 +#define PARF_BDF_TO_SID_CFG 0x2c00 + +/* PARF_INT_ALL_{STATUS/CLEAR/MASK} register fields */ +#define PARF_INT_ALL_LINK_DOWN BIT(1) +#define PARF_INT_ALL_BME BIT(2) +#define PARF_INT_ALL_PM_TURNOFF BIT(3) +#define PARF_INT_ALL_DEBUG BIT(4) +#define PARF_INT_ALL_LTR BIT(5) +#define PARF_INT_ALL_MHI_Q6 BIT(6) +#define PARF_INT_ALL_MHI_A7 BIT(7) +#define PARF_INT_ALL_DSTATE_CHANGE BIT(8) +#define PARF_INT_ALL_L1SUB_TIMEOUT BIT(9) +#define PARF_INT_ALL_MMIO_WRITE BIT(10) +#define PARF_INT_ALL_CFG_WRITE BIT(11) +#define PARF_INT_ALL_BRIDGE_FLUSH_N BIT(12) +#define PARF_INT_ALL_LINK_UP BIT(13) +#define PARF_INT_ALL_AER_LEGACY BIT(14) +#define PARF_INT_ALL_PLS_ERR BIT(15) +#define PARF_INT_ALL_PME_LEGACY BIT(16) +#define PARF_INT_ALL_PLS_PME BIT(17) + +/* PARF_BDF_TO_SID_CFG register fields */ +#define PARF_BDF_TO_SID_BYPASS BIT(0) + +/* PARF_DEBUG_INT_EN register fields */ +#define PARF_DEBUG_INT_PM_DSTATE_CHANGE BIT(1) +#define PARF_DEBUG_INT_CFG_BUS_MASTER_EN BIT(2) +#define PARF_DEBUG_INT_RADM_PM_TURNOFF BIT(3) + +/* PARF_DEVICE_TYPE register fields */ +#define PARF_DEVICE_TYPE_EP 0x0 + +/* PARF_PM_CTRL register fields */ +#define PARF_PM_CTRL_REQ_EXIT_L1 BIT(1) +#define PARF_PM_CTRL_READY_ENTR_L23 BIT(2) +#define PARF_PM_CTRL_REQ_NOT_ENTR_L1 BIT(5) + +/* PARF_AXI_MSTR_RD_HALT_NO_WRITES register fields */ +#define PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN BIT(0) + +/* PARF_AXI_MSTR_WR_ADDR_HALT register fields */ +#define PARF_AXI_MSTR_WR_ADDR_HALT_EN BIT(31) + +/* PARF_Q2A_FLUSH register fields */ +#define PARF_Q2A_FLUSH_EN BIT(16) + +/* PARF_SYS_CTRL register fields */ +#define PARF_SYS_CTRL_AUX_PWR_DET BIT(4) +#define PARF_SYS_CTRL_CORE_CLK_CGC_DIS BIT(6) +#define PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE BIT(11) + +/* PARF_DB_CTRL register fields */ +#define PARF_DB_CTRL_INSR_DBNCR_BLOCK BIT(0) +#define PARF_DB_CTRL_RMVL_DBNCR_BLOCK BIT(1) +#define PARF_DB_CTRL_DBI_WKP_BLOCK BIT(4) +#define PARF_DB_CTRL_SLV_WKP_BLOCK BIT(5) +#define PARF_DB_CTRL_MST_WKP_BLOCK BIT(6) + +/* PARF_CFG_BITS register fields */ +#define PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN BIT(1) + +/* ELBI registers */ +#define ELBI_SYS_STTS 0x08 + +/* DBI registers */ +#define DBI_CON_STATUS 0x44 + +/* DBI register fields */ +#define DBI_CON_STATUS_POWER_STATE_MASK GENMASK(1, 0) + +#define XMLH_LINK_UP 0x400 +#define CORE_RESET_TIME_US_MIN 1000 +#define CORE_RESET_TIME_US_MAX 1005 +#define WAKE_DELAY_US 2000 /* 2 ms */ + +#define to_pcie_ep(x) dev_get_drvdata((x)->dev) + +enum qcom_pcie_ep_link_status { + QCOM_PCIE_EP_LINK_DISABLED, + QCOM_PCIE_EP_LINK_ENABLED, + QCOM_PCIE_EP_LINK_UP, + QCOM_PCIE_EP_LINK_DOWN, +}; + +static struct clk_bulk_data qcom_pcie_ep_clks[] = { + { .id = "cfg" }, + { .id = "aux" }, + { .id = "bus_master" }, + { .id = "bus_slave" }, + { .id = "ref" }, + { .id = "sleep" }, + { .id = "slave_q2a" }, +}; + +struct qcom_pcie_ep { + struct dw_pcie pci; + + void __iomem *parf; + void __iomem *elbi; + struct regmap *perst_map; + struct resource *mmio_res; + + struct reset_control *core_reset; + struct gpio_desc *reset; + struct gpio_desc *wake; + struct phy *phy; + + u32 perst_en; + u32 perst_sep_en; + + enum qcom_pcie_ep_link_status link_status; + int global_irq; + int perst_irq; +}; + +static int qcom_pcie_ep_core_reset(struct qcom_pcie_ep *pcie_ep) +{ + struct dw_pcie *pci = &pcie_ep->pci; + struct device *dev = pci->dev; + int ret; + + ret = reset_control_assert(pcie_ep->core_reset); + if (ret) { + dev_err(dev, "Cannot assert core reset\n"); + return ret; + } + + usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); + + ret = reset_control_deassert(pcie_ep->core_reset); + if (ret) { + dev_err(dev, "Cannot de-assert core reset\n"); + return ret; + } + + usleep_range(CORE_RESET_TIME_US_MIN, CORE_RESET_TIME_US_MAX); + + return 0; +} + +/* + * Delatch PERST_EN and PERST_SEPARATION_ENABLE with TCSR to avoid + * device reset during host reboot and hibernation. The driver is + * expected to handle this situation. + */ +static void qcom_pcie_ep_configure_tcsr(struct qcom_pcie_ep *pcie_ep) +{ + regmap_write(pcie_ep->perst_map, pcie_ep->perst_en, 0); + regmap_write(pcie_ep->perst_map, pcie_ep->perst_sep_en, 0); +} + +static int qcom_pcie_dw_link_up(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + u32 reg; + + reg = readl_relaxed(pcie_ep->elbi + ELBI_SYS_STTS); + + return reg & XMLH_LINK_UP; +} + +static int qcom_pcie_dw_start_link(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + + enable_irq(pcie_ep->perst_irq); + + return 0; +} + +static void qcom_pcie_dw_stop_link(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + + disable_irq(pcie_ep->perst_irq); +} + +static int qcom_pcie_perst_deassert(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + struct device *dev = pci->dev; + u32 val, offset; + int ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + if (ret) + return ret; + + ret = qcom_pcie_ep_core_reset(pcie_ep); + if (ret) + goto err_disable_clk; + + ret = phy_init(pcie_ep->phy); + if (ret) + goto err_disable_clk; + + ret = phy_power_on(pcie_ep->phy); + if (ret) + goto err_phy_exit; + + /* Assert WAKE# to RC to indicate device is ready */ + gpiod_set_value_cansleep(pcie_ep->wake, 1); + usleep_range(WAKE_DELAY_US, WAKE_DELAY_US + 500); + gpiod_set_value_cansleep(pcie_ep->wake, 0); + + qcom_pcie_ep_configure_tcsr(pcie_ep); + + /* Disable BDF to SID mapping */ + val = readl_relaxed(pcie_ep->parf + PARF_BDF_TO_SID_CFG); + val |= PARF_BDF_TO_SID_BYPASS; + writel_relaxed(val, pcie_ep->parf + PARF_BDF_TO_SID_CFG); + + /* Enable debug IRQ */ + val = readl_relaxed(pcie_ep->parf + PARF_DEBUG_INT_EN); + val |= PARF_DEBUG_INT_RADM_PM_TURNOFF | PARF_DEBUG_INT_CFG_BUS_MASTER_EN | + PARF_DEBUG_INT_PM_DSTATE_CHANGE; + writel_relaxed(val, pcie_ep->parf + PARF_DEBUG_INT_EN); + + /* Configure PCIe to endpoint mode */ + writel_relaxed(PARF_DEVICE_TYPE_EP, pcie_ep->parf + PARF_DEVICE_TYPE); + + /* Allow entering L1 state */ + val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); + val &= ~PARF_PM_CTRL_REQ_NOT_ENTR_L1; + writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); + + /* Read halts write */ + val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); + val &= ~PARF_AXI_MSTR_RD_HALT_NO_WRITE_EN; + writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_RD_HALT_NO_WRITES); + + /* Write after write halt */ + val = readl_relaxed(pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); + val |= PARF_AXI_MSTR_WR_ADDR_HALT_EN; + writel_relaxed(val, pcie_ep->parf + PARF_AXI_MSTR_WR_ADDR_HALT); + + /* Q2A flush disable */ + val = readl_relaxed(pcie_ep->parf + PARF_Q2A_FLUSH); + val &= ~PARF_Q2A_FLUSH_EN; + writel_relaxed(val, pcie_ep->parf + PARF_Q2A_FLUSH); + + /* Disable DBI Wakeup, core clock CGC and enable AUX power */ + val = readl_relaxed(pcie_ep->parf + PARF_SYS_CTRL); + val |= PARF_SYS_CTRL_SLV_DBI_WAKE_DISABLE | PARF_SYS_CTRL_CORE_CLK_CGC_DIS | + PARF_SYS_CTRL_AUX_PWR_DET; + writel_relaxed(val, pcie_ep->parf + PARF_SYS_CTRL); + + /* Disable the debouncers */ + val = readl_relaxed(pcie_ep->parf + PARF_DB_CTRL); + val |= PARF_DB_CTRL_INSR_DBNCR_BLOCK | PARF_DB_CTRL_RMVL_DBNCR_BLOCK | + PARF_DB_CTRL_DBI_WKP_BLOCK | PARF_DB_CTRL_SLV_WKP_BLOCK | + PARF_DB_CTRL_MST_WKP_BLOCK; + writel_relaxed(val, pcie_ep->parf + PARF_DB_CTRL); + + /* Request to exit from L1SS for MSI and LTR MSG */ + val = readl_relaxed(pcie_ep->parf + PARF_CFG_BITS); + val |= PARF_CFG_BITS_REQ_EXIT_L1SS_MSI_LTR_EN; + writel_relaxed(val, pcie_ep->parf + PARF_CFG_BITS); + + dw_pcie_dbi_ro_wr_en(pci); + + /* Set the L0s Exit Latency to 2us-4us = 0x6 */ + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_L0SEL; + val |= FIELD_PREP(PCI_EXP_LNKCAP_L0SEL, 0x6); + dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); + + /* Set the L1 Exit Latency to be 32us-64 us = 0x6 */ + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + val = dw_pcie_readl_dbi(pci, offset + PCI_EXP_LNKCAP); + val &= ~PCI_EXP_LNKCAP_L1EL; + val |= FIELD_PREP(PCI_EXP_LNKCAP_L1EL, 0x6); + dw_pcie_writel_dbi(pci, offset + PCI_EXP_LNKCAP, val); + + dw_pcie_dbi_ro_wr_dis(pci); + + writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK); + val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME | PARF_INT_ALL_PM_TURNOFF | + PARF_INT_ALL_DSTATE_CHANGE | PARF_INT_ALL_LINK_UP; + writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK); + + ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep); + if (ret) { + dev_err(dev, "Failed to complete initialization: %d\n", ret); + goto err_phy_power_off; + } + + /* + * The physical address of the MMIO region which is exposed as the BAR + * should be written to MHI BASE registers. + */ + writel_relaxed(pcie_ep->mmio_res->start, pcie_ep->parf + PARF_MHI_BASE_ADDR_LOWER); + writel_relaxed(0, pcie_ep->parf + PARF_MHI_BASE_ADDR_UPPER); + + dw_pcie_ep_init_notify(&pcie_ep->pci.ep); + + /* Enable LTSSM */ + val = readl_relaxed(pcie_ep->parf + PARF_LTSSM); + val |= BIT(8); + writel_relaxed(val, pcie_ep->parf + PARF_LTSSM); + + return 0; + +err_phy_power_off: + phy_power_off(pcie_ep->phy); +err_phy_exit: + phy_exit(pcie_ep->phy); +err_disable_clk: + clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + + return ret; +} + +static void qcom_pcie_perst_assert(struct dw_pcie *pci) +{ + struct qcom_pcie_ep *pcie_ep = to_pcie_ep(pci); + struct device *dev = pci->dev; + + if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) { + dev_dbg(dev, "Link is already disabled\n"); + return; + } + + phy_power_off(pcie_ep->phy); + phy_exit(pcie_ep->phy); + clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + pcie_ep->link_status = QCOM_PCIE_EP_LINK_DISABLED; +} + +/* Common DWC controller ops */ +static const struct dw_pcie_ops pci_ops = { + .link_up = qcom_pcie_dw_link_up, + .start_link = qcom_pcie_dw_start_link, + .stop_link = qcom_pcie_dw_stop_link, +}; + +static int qcom_pcie_ep_get_io_resources(struct platform_device *pdev, + struct qcom_pcie_ep *pcie_ep) +{ + struct device *dev = &pdev->dev; + struct dw_pcie *pci = &pcie_ep->pci; + struct device_node *syscon; + struct resource *res; + int ret; + + pcie_ep->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); + if (IS_ERR(pcie_ep->parf)) + return PTR_ERR(pcie_ep->parf); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pci->dbi_base)) + return PTR_ERR(pci->dbi_base); + pci->dbi_base2 = pci->dbi_base; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); + pcie_ep->elbi = devm_pci_remap_cfg_resource(dev, res); + if (IS_ERR(pcie_ep->elbi)) + return PTR_ERR(pcie_ep->elbi); + + pcie_ep->mmio_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); + + syscon = of_parse_phandle(dev->of_node, "qcom,perst-regs", 0); + if (!syscon) { + dev_err(dev, "Failed to parse qcom,perst-regs\n"); + return -EINVAL; + } + + pcie_ep->perst_map = syscon_node_to_regmap(syscon); + of_node_put(syscon); + if (IS_ERR(pcie_ep->perst_map)) + return PTR_ERR(pcie_ep->perst_map); + + ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", + 1, &pcie_ep->perst_en); + if (ret < 0) { + dev_err(dev, "No Perst Enable offset in syscon\n"); + return ret; + } + + ret = of_property_read_u32_index(dev->of_node, "qcom,perst-regs", + 2, &pcie_ep->perst_sep_en); + if (ret < 0) { + dev_err(dev, "No Perst Separation Enable offset in syscon\n"); + return ret; + } + + return 0; +} + +static int qcom_pcie_ep_get_resources(struct platform_device *pdev, + struct qcom_pcie_ep *pcie_ep) +{ + struct device *dev = &pdev->dev; + int ret; + + ret = qcom_pcie_ep_get_io_resources(pdev, pcie_ep); + if (ret) { + dev_err(&pdev->dev, "Failed to get io resources %d\n", ret); + return ret; + } + + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(qcom_pcie_ep_clks), + qcom_pcie_ep_clks); + if (ret) + return ret; + + pcie_ep->core_reset = devm_reset_control_get_exclusive(dev, "core"); + if (IS_ERR(pcie_ep->core_reset)) + return PTR_ERR(pcie_ep->core_reset); + + pcie_ep->reset = devm_gpiod_get(dev, "reset", GPIOD_IN); + if (IS_ERR(pcie_ep->reset)) + return PTR_ERR(pcie_ep->reset); + + pcie_ep->wake = devm_gpiod_get_optional(dev, "wake", GPIOD_OUT_LOW); + if (IS_ERR(pcie_ep->wake)) + return PTR_ERR(pcie_ep->wake); + + pcie_ep->phy = devm_phy_optional_get(&pdev->dev, "pciephy"); + if (IS_ERR(pcie_ep->phy)) + ret = PTR_ERR(pcie_ep->phy); + + return ret; +} + +/* TODO: Notify clients about PCIe state change */ +static irqreturn_t qcom_pcie_ep_global_irq_thread(int irq, void *data) +{ + struct qcom_pcie_ep *pcie_ep = data; + struct dw_pcie *pci = &pcie_ep->pci; + struct device *dev = pci->dev; + u32 status = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_STATUS); + u32 mask = readl_relaxed(pcie_ep->parf + PARF_INT_ALL_MASK); + u32 dstate, val; + + writel_relaxed(status, pcie_ep->parf + PARF_INT_ALL_CLEAR); + status &= mask; + + if (FIELD_GET(PARF_INT_ALL_LINK_DOWN, status)) { + dev_dbg(dev, "Received Linkdown event\n"); + pcie_ep->link_status = QCOM_PCIE_EP_LINK_DOWN; + } else if (FIELD_GET(PARF_INT_ALL_BME, status)) { + dev_dbg(dev, "Received BME event. Link is enabled!\n"); + pcie_ep->link_status = QCOM_PCIE_EP_LINK_ENABLED; + pci_epc_bme_notify(pci->ep.epc); + } else if (FIELD_GET(PARF_INT_ALL_PM_TURNOFF, status)) { + dev_dbg(dev, "Received PM Turn-off event! Entering L23\n"); + val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); + val |= PARF_PM_CTRL_READY_ENTR_L23; + writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); + } else if (FIELD_GET(PARF_INT_ALL_DSTATE_CHANGE, status)) { + dstate = dw_pcie_readl_dbi(pci, DBI_CON_STATUS) & + DBI_CON_STATUS_POWER_STATE_MASK; + dev_dbg(dev, "Received D%d state event\n", dstate); + if (dstate == 3) { + val = readl_relaxed(pcie_ep->parf + PARF_PM_CTRL); + val |= PARF_PM_CTRL_REQ_EXIT_L1; + writel_relaxed(val, pcie_ep->parf + PARF_PM_CTRL); + } + pci_epc_d_state_notify(pci->ep.epc, &dstate); + } else if (FIELD_GET(PARF_INT_ALL_LINK_UP, status)) { + dev_dbg(dev, "Received Linkup event. Enumeration complete!\n"); + dw_pcie_ep_linkup(&pci->ep); + pcie_ep->link_status = QCOM_PCIE_EP_LINK_UP; + } else { + dev_dbg(dev, "Received unknown event: %d\n", status); + } + + return IRQ_HANDLED; +} + +static irqreturn_t qcom_pcie_ep_perst_irq_thread(int irq, void *data) +{ + struct qcom_pcie_ep *pcie_ep = data; + struct dw_pcie *pci = &pcie_ep->pci; + struct device *dev = pci->dev; + u32 perst; + + perst = gpiod_get_value(pcie_ep->reset); + if (perst) { + dev_dbg(dev, "PERST asserted by host. Shutting down the PCIe link!\n"); + qcom_pcie_perst_assert(pci); + } else { + dev_dbg(dev, "PERST de-asserted by host. Starting link training!\n"); + qcom_pcie_perst_deassert(pci); + } + + irq_set_irq_type(gpiod_to_irq(pcie_ep->reset), + (perst ? IRQF_TRIGGER_HIGH : IRQF_TRIGGER_LOW)); + + return IRQ_HANDLED; +} + +static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev, + struct qcom_pcie_ep *pcie_ep) +{ + int irq, ret; + + irq = platform_get_irq_byname(pdev, "global"); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get Global IRQ\n"); + return irq; + } + + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, + qcom_pcie_ep_global_irq_thread, + IRQF_ONESHOT, + "global_irq", pcie_ep); + if (ret) { + dev_err(&pdev->dev, "Failed to request Global IRQ\n"); + return ret; + } + + pcie_ep->perst_irq = gpiod_to_irq(pcie_ep->reset); + irq_set_status_flags(pcie_ep->perst_irq, IRQ_NOAUTOEN); + ret = devm_request_threaded_irq(&pdev->dev, pcie_ep->perst_irq, NULL, + qcom_pcie_ep_perst_irq_thread, + IRQF_TRIGGER_HIGH | IRQF_ONESHOT, + "perst_irq", pcie_ep); + if (ret) { + dev_err(&pdev->dev, "Failed to request PERST IRQ\n"); + disable_irq(irq); + return ret; + } + + return 0; +} + +static int qcom_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, + enum pci_epc_irq_type type, u16 interrupt_num) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + + switch (type) { + case PCI_EPC_IRQ_LEGACY: + return dw_pcie_ep_raise_legacy_irq(ep, func_no); + case PCI_EPC_IRQ_MSI: + return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); + default: + dev_err(pci->dev, "Unknown IRQ type\n"); + return -EINVAL; + } +} + +static const struct pci_epc_features qcom_pcie_epc_features = { + .linkup_notifier = true, + .core_init_notifier = true, + .msi_capable = true, + .msix_capable = false, +}; + +static const struct pci_epc_features * +qcom_pcie_epc_get_features(struct dw_pcie_ep *pci_ep) +{ + return &qcom_pcie_epc_features; +} + +static void qcom_pcie_ep_init(struct dw_pcie_ep *ep) +{ + struct dw_pcie *pci = to_dw_pcie_from_ep(ep); + enum pci_barno bar; + + for (bar = BAR_0; bar <= BAR_5; bar++) + dw_pcie_ep_reset_bar(pci, bar); +} + +static struct dw_pcie_ep_ops pci_ep_ops = { + .ep_init = qcom_pcie_ep_init, + .raise_irq = qcom_pcie_ep_raise_irq, + .get_features = qcom_pcie_epc_get_features, +}; + +static int qcom_pcie_ep_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct qcom_pcie_ep *pcie_ep; + int ret; + + pcie_ep = devm_kzalloc(dev, sizeof(*pcie_ep), GFP_KERNEL); + if (!pcie_ep) + return -ENOMEM; + + pcie_ep->pci.dev = dev; + pcie_ep->pci.ops = &pci_ops; + pcie_ep->pci.ep.ops = &pci_ep_ops; + platform_set_drvdata(pdev, pcie_ep); + + ret = qcom_pcie_ep_get_resources(pdev, pcie_ep); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + if (ret) + return ret; + + ret = qcom_pcie_ep_core_reset(pcie_ep); + if (ret) + goto err_disable_clk; + + ret = phy_init(pcie_ep->phy); + if (ret) + goto err_disable_clk; + + /* PHY needs to be powered on for dw_pcie_ep_init() */ + ret = phy_power_on(pcie_ep->phy); + if (ret) + goto err_phy_exit; + + ret = dw_pcie_ep_init(&pcie_ep->pci.ep); + if (ret) { + dev_err(dev, "Failed to initialize endpoint:%d\n", ret); + goto err_phy_power_off; + } + + ret = qcom_pcie_ep_enable_irq_resources(pdev, pcie_ep); + if (ret) + goto err_phy_power_off; + + return 0; + +err_phy_power_off: + phy_power_off(pcie_ep->phy); +err_phy_exit: + phy_exit(pcie_ep->phy); +err_disable_clk: + clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + + return ret; +} + +static int qcom_pcie_ep_remove(struct platform_device *pdev) +{ + struct qcom_pcie_ep *pcie_ep = platform_get_drvdata(pdev); + + if (pcie_ep->link_status == QCOM_PCIE_EP_LINK_DISABLED) + return 0; + + phy_power_off(pcie_ep->phy); + phy_exit(pcie_ep->phy); + clk_bulk_disable_unprepare(ARRAY_SIZE(qcom_pcie_ep_clks), qcom_pcie_ep_clks); + + return 0; +} + +static const struct of_device_id qcom_pcie_ep_match[] = { + { .compatible = "qcom,sdx55-pcie-ep", }, + { } +}; + +static struct platform_driver qcom_pcie_ep_driver = { + .probe = qcom_pcie_ep_probe, + .remove = qcom_pcie_ep_remove, + .driver = { + .name = "qcom-pcie-ep", + .of_match_table = qcom_pcie_ep_match, + }, +}; +builtin_platform_driver(qcom_pcie_ep_driver); + +MODULE_AUTHOR("Siddartha Mohanadoss <smohanad@codeaurora.org>"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm PCIe Endpoint controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/functions/Kconfig b/drivers/pci/endpoint/functions/Kconfig index 5f1242ca2f4e..93497fb70e31 100644 --- a/drivers/pci/endpoint/functions/Kconfig +++ b/drivers/pci/endpoint/functions/Kconfig @@ -25,3 +25,13 @@ config PCI_EPF_NTB device tree. If in doubt, say "N" to disable Endpoint NTB driver. + +config PCI_EPF_MHI + tristate "PCI Endpoint driver for MHI bus" + depends on PCI_ENDPOINT && MHI_BUS_EP + help + Enable this configuration option to enable the PCI Endpoint + driver for Modem Host Interface (MHI) bus found in Qualcomm + modems such as SDX55. + + If in doubt, say "N" to disable Endpoint driver for MHI bus. diff --git a/drivers/pci/endpoint/functions/Makefile b/drivers/pci/endpoint/functions/Makefile index 96ab932a537a..eee99b2e9103 100644 --- a/drivers/pci/endpoint/functions/Makefile +++ b/drivers/pci/endpoint/functions/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_PCI_EPF_TEST) += pci-epf-test.o obj-$(CONFIG_PCI_EPF_NTB) += pci-epf-ntb.o +obj-$(CONFIG_PCI_EPF_MHI) += pci-epf-mhi.o diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c new file mode 100644 index 000000000000..5b7a0044feef --- /dev/null +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI EPF driver for MHI Endpoint + * + * Copyright (C) 2021 Linaro Ltd. + * Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> + */ + +#include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci_ids.h> +#include <linux/random.h> +#include <linux/of_address.h> +#include <linux/pci_regs.h> +#include <linux/platform_device.h> + +#include <linux/mhi_ep.h> +#include <linux/pci_regs.h> +#include <linux/pci-epc.h> +#include <linux/pci-epf.h> + +#define MHI_VERSION_1_0 0x01000000 + +struct pci_epf_mhi_ep_info { + const struct mhi_ep_cntrl_config *config; + struct pci_epf_header *epf_header; + enum pci_barno bar_num; + u32 epf_flags; + u32 msi_count; +}; + +#define MHI_EP_CHANNEL_CONFIG_UL(ch_num, ch_name) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .dir = DMA_TO_DEVICE, \ + } + +#define MHI_EP_CHANNEL_CONFIG_DL(ch_num, ch_name) \ + { \ + .num = ch_num, \ + .name = ch_name, \ + .dir = DMA_FROM_DEVICE, \ + } + +static const struct mhi_ep_channel_config mhi_v1_channels[] = { + MHI_EP_CHANNEL_CONFIG_UL(0, "LOOPBACK"), + MHI_EP_CHANNEL_CONFIG_DL(1, "LOOPBACK"), + MHI_EP_CHANNEL_CONFIG_UL(2, "SAHARA"), + MHI_EP_CHANNEL_CONFIG_DL(3, "SAHARA"), + MHI_EP_CHANNEL_CONFIG_UL(4, "DIAG"), + MHI_EP_CHANNEL_CONFIG_DL(5, "DIAG"), + MHI_EP_CHANNEL_CONFIG_UL(6, "SSR"), + MHI_EP_CHANNEL_CONFIG_DL(7, "SSR"), + MHI_EP_CHANNEL_CONFIG_UL(8, "QDSS"), + MHI_EP_CHANNEL_CONFIG_DL(9, "QDSS"), + MHI_EP_CHANNEL_CONFIG_UL(10, "EFS"), + MHI_EP_CHANNEL_CONFIG_DL(11, "EFS"), + MHI_EP_CHANNEL_CONFIG_UL(12, "MBIM"), + MHI_EP_CHANNEL_CONFIG_DL(13, "MBIM"), + MHI_EP_CHANNEL_CONFIG_UL(14, "QMI"), + MHI_EP_CHANNEL_CONFIG_DL(15, "QMI"), + MHI_EP_CHANNEL_CONFIG_UL(16, "QMI"), + MHI_EP_CHANNEL_CONFIG_DL(17, "QMI"), + MHI_EP_CHANNEL_CONFIG_UL(18, "IP-CTRL-1"), + MHI_EP_CHANNEL_CONFIG_DL(19, "IP-CTRL-1"), + MHI_EP_CHANNEL_CONFIG_UL(20, "IPCR"), + MHI_EP_CHANNEL_CONFIG_DL(21, "IPCR"), + MHI_EP_CHANNEL_CONFIG_UL(32, "DUN"), + MHI_EP_CHANNEL_CONFIG_DL(33, "DUN"), + MHI_EP_CHANNEL_CONFIG_UL(36, "IP_SW0"), + MHI_EP_CHANNEL_CONFIG_DL(37, "IP_SW0"), +}; + +static const struct mhi_ep_cntrl_config mhi_v1_config = { + .max_channels = 128, + .num_channels = ARRAY_SIZE(mhi_v1_channels), + .ch_cfg = mhi_v1_channels, + .mhi_version = MHI_VERSION_1_0, +}; + +static struct pci_epf_header sdx55_header = { + .vendorid = 0x17cb, + .deviceid = 0x0306, + .revid = 0x0, + .progif_code = 0x0, + .subclass_code = 0x0, + .baseclass_code = 0xff, + .cache_line_size = 0x10, + .subsys_vendor_id = 0x0, + .subsys_id = 0x0, +}; + +static const struct pci_epf_mhi_ep_info sdx55_info = { + .config = &mhi_v1_config, + .epf_header = &sdx55_header, + .bar_num = BAR_0, + .epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32, + .msi_count = 4, +}; + +struct pci_epf_mhi { + struct mhi_ep_cntrl mhi_cntrl; + struct pci_epf *epf; + const struct pci_epf_mhi_ep_info *info; + void __iomem *mmio; + resource_size_t mmio_phys; + bool enumerated; + bool mhi_registered; + u32 mmio_size; + int irq; +}; + +void __iomem *pci_epf_mhi_alloc_addr(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t *phys_addr, size_t size) +{ + struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl); + struct pci_epc *epc = epf_mhi->epf->epc; + + return pci_epc_mem_alloc_addr(epc, phys_addr, size); +} + +void pci_epf_mhi_free_addr(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t phys_addr, void __iomem *virt_addr, size_t size) +{ + struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl); + struct pci_epc *epc = epf_mhi->epf->epc; + + pci_epc_mem_free_addr(epc, phys_addr, virt_addr, size); +} + +inline int pci_epf_mhi_map_addr(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t phys_addr, u64 pci_addr, size_t size) +{ + struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl); + struct pci_epf *epf = epf_mhi->epf; + struct pci_epc *epc = epf->epc; + + return pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, pci_addr, size); +} + +void pci_epf_mhi_unmap_addr(struct mhi_ep_cntrl *mhi_cntrl, phys_addr_t phys_addr) +{ + struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl); + struct pci_epf *epf = epf_mhi->epf; + struct pci_epc *epc = epf->epc; + + pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); +} + +void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl) +{ + struct pci_epf_mhi *epf_mhi = container_of(mhi_cntrl, struct pci_epf_mhi, mhi_cntrl); + struct pci_epf *epf = epf_mhi->epf; + struct pci_epc *epc = epf->epc; + + /* Using fixed MSI for now */ + pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, PCI_EPC_IRQ_MSI, 1); +} + +static int pci_epf_mhi_notifier(struct notifier_block *nb, unsigned long val, + void *data) +{ + struct pci_epf *epf = container_of(nb, struct pci_epf, nb); + struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); + struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl; + const struct pci_epf_mhi_ep_info *info = epf_mhi->info; + struct pci_epf_bar *epf_bar = &epf->bar[info->bar_num]; + struct pci_epc *epc = epf->epc; + struct device *dev = &epf->dev; + int ret; + u32 dstate; + + /* + * If the notification is other than CORE_INIT and if MHI EP is not + * yet registered, then error out. + */ + if ((val != CORE_INIT) && !epf_mhi->mhi_registered) { + dev_err(dev, "MHI EP not yet registered\n"); + return NOTIFY_BAD; + } + + switch (val) { + case CORE_INIT: + epf_bar->phys_addr = epf_mhi->mmio_phys; + epf_bar->size = epf_mhi->mmio_size; + epf_bar->barno = info->bar_num; + epf_bar->flags = info->epf_flags; + ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, epf_bar); + if (ret) { + dev_err(dev, "Failed to set BAR: %d\n", ret); + return NOTIFY_BAD; + } + + ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, order_base_2(info->msi_count)); + if (ret) { + dev_err(dev, "Failed to set MSI configuration: %d\n", ret); + return NOTIFY_BAD; + } + + ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, info->epf_header); + if (ret) { + dev_err(dev, "Failed to set Configuration header: %d\n", ret); + return NOTIFY_BAD; + } + + mhi_cntrl->mmio = epf_mhi->mmio; + mhi_cntrl->irq = epf_mhi->irq; + + /* Assign the struct dev of PCI EP as MHI controller device */ + mhi_cntrl->cntrl_dev = epc->dev.parent; + mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq; + mhi_cntrl->alloc_addr = pci_epf_mhi_alloc_addr; + mhi_cntrl->free_addr = pci_epf_mhi_free_addr; + mhi_cntrl->map_addr = pci_epf_mhi_map_addr; + mhi_cntrl->unmap_addr = pci_epf_mhi_unmap_addr; + + /* Register the MHI EP controller */ + ret = mhi_ep_register_controller(mhi_cntrl, info->config); + if (ret) { + dev_err(dev, "Failed to register MHI EP controller\n"); + return NOTIFY_BAD; + } + + epf_mhi->mhi_registered = true; + break; + case LINK_UP: + ret = mhi_ep_set_link_state(mhi_cntrl, LINK_STATE_UP); + if (ret) { + dev_err(dev, "Failed to set LINK UP state\n"); + return NOTIFY_BAD; + } + + break; + case LINK_DOWN: + ret = mhi_ep_set_link_state(mhi_cntrl, LINK_STATE_DOWN); + if (ret) { + dev_err(dev, "Failed to set LINK DOWN state\n"); + return NOTIFY_BAD; + } + + mhi_ep_power_down(mhi_cntrl); + epf_mhi->enumerated = false; + + break; + case BME: + ret = mhi_ep_set_link_state(mhi_cntrl, LINK_STATE_BME); + if (ret) { + dev_err(dev, "Failed to set BME state\n"); + return NOTIFY_BAD; + } + + if (!epf_mhi->enumerated) { + /* Power up the MHI EP controller */ + mhi_ep_power_up(mhi_cntrl); + epf_mhi->enumerated = true; + } + + break; + case D_STATE: + dstate = (*(int *)data); + if (dstate == 0) { + ret = mhi_ep_set_link_state(mhi_cntrl, LINK_STATE_D0); + if (ret) { + dev_err(dev, "Failed to set D0 state\n"); + return NOTIFY_BAD; + } + } else if (dstate == 3) { + ret = mhi_ep_set_link_state(mhi_cntrl, LINK_STATE_D3); + if (ret) { + dev_err(dev, "Failed to set D3 state\n"); + return NOTIFY_BAD; + } + } else { + dev_err(dev, "Invalid D state: %d\n", dstate); + return NOTIFY_BAD; + } + + break; + default: + dev_err(&epf->dev, "Invalid MHI device notifier event: %d\n", ret); + return NOTIFY_BAD; + } + + return NOTIFY_OK; +} + +static int pci_epf_mhi_bind(struct pci_epf *epf) +{ + struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); + struct pci_epc *epc = epf->epc; + struct platform_device *pdev = to_platform_device(epc->dev.parent); + struct device *dev = &epf->dev; + struct resource *res; + int ret; + + if (WARN_ON_ONCE(!epc)) + return -EINVAL; + + /* Get MMIO physical and virtual address from controller device */ + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mmio"); + epf_mhi->mmio_phys = res->start; + epf_mhi->mmio_size = resource_size(res); + + epf_mhi->mmio = ioremap_wc(epf_mhi->mmio_phys, epf_mhi->mmio_size); + if (IS_ERR(epf_mhi->mmio)) + return PTR_ERR(epf_mhi->mmio); + + ret = platform_get_irq_byname(pdev, "doorbell"); + if (ret < 0) { + dev_err(dev, "Failed to get Doorbell IRQ\n"); + iounmap(epf_mhi->mmio); + return ret; + } + + epf_mhi->irq = ret; + epf->nb.notifier_call = pci_epf_mhi_notifier; + pci_epc_register_notifier(epc, &epf->nb); + + return 0; +} + +static void pci_epf_mhi_unbind(struct pci_epf *epf) +{ + struct pci_epc *epc = epf->epc; + struct pci_epf_bar *epf_bar = &epf->bar[0]; + struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf); + struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl; + + pci_epc_unregister_notifier(epc, &epf->nb); + + if (epf_mhi->mhi_registered) { + mhi_ep_power_down(mhi_cntrl); + mhi_ep_unregister_controller(mhi_cntrl); + epf_mhi->mhi_registered = false; + } + + pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, epf_bar); + iounmap(epf_mhi->mmio); +} + +static int pci_epf_mhi_probe(struct pci_epf *epf) +{ + struct pci_epf_mhi_ep_info *info = + (struct pci_epf_mhi_ep_info *) epf->driver->id_table->driver_data; + struct pci_epf_mhi *epf_mhi; + struct device *dev = &epf->dev; + + epf_mhi = devm_kzalloc(dev, sizeof(*epf_mhi), GFP_KERNEL); + if (!epf_mhi) + return -ENOMEM; + + epf_mhi->info = info; + epf_mhi->epf = epf; + epf_set_drvdata(epf, epf_mhi); + + return 0; +} + +static const struct pci_epf_device_id pci_epf_mhi_ids[] = { + { + .name = "pci_epf_mhi", .driver_data = (kernel_ulong_t) &sdx55_info, + }, + {}, +}; + +static struct pci_epf_ops pci_epf_mhi_ops = { + .unbind = pci_epf_mhi_unbind, + .bind = pci_epf_mhi_bind, +}; + +static struct pci_epf_driver pci_epf_mhi_driver = { + .driver.name = "pci_epf_mhi", + .probe = pci_epf_mhi_probe, + .id_table = pci_epf_mhi_ids, + .ops = &pci_epf_mhi_ops, + .owner = THIS_MODULE, +}; + +static int __init pci_epf_mhi_init(void) +{ + int ret; + + ret = pci_epf_register_driver(&pci_epf_mhi_driver); + if (ret) { + pr_err("Failed to register PCI EPF MHI driver: %d\n", ret); + return ret; + } + + return 0; +} +module_init(pci_epf_mhi_init); + +static void __exit pci_epf_mhi_exit(void) +{ + pci_epf_unregister_driver(&pci_epf_mhi_driver); +} +module_exit(pci_epf_mhi_exit); + +MODULE_DESCRIPTION("PCI EPF driver for MHI Endpoint"); +MODULE_AUTHOR("Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index ecbb0fb3b653..5e7e0845de82 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -698,6 +698,23 @@ void pci_epc_linkup(struct pci_epc *epc) EXPORT_SYMBOL_GPL(pci_epc_linkup); /** + * pci_epc_linkdown() - Notify the EPF device that EPC device has dropped the + * connection with the Root Complex. + * @epc: the EPC device which has dropped the link with the host + * + * Invoke to Notify the EPF device that the EPC device has dropped the + * connection with the Root Complex. + */ +void pci_epc_linkdown(struct pci_epc *epc) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, LINK_DOWN, NULL); +} +EXPORT_SYMBOL_GPL(pci_epc_linkdown); + +/** * pci_epc_init_notify() - Notify the EPF device that EPC device's core * initialization is completed. * @epc: the EPC device whose core initialization is completeds @@ -715,6 +732,78 @@ void pci_epc_init_notify(struct pci_epc *epc) EXPORT_SYMBOL_GPL(pci_epc_init_notify); /** + * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received + * the BME event from the Root complex + * @epc: the EPC device that received the BME event + * + * Invoke to Notify the EPF device that the EPC device has received the Bus + * Master Enable (BME) event from the Root complex + */ +void pci_epc_bme_notify(struct pci_epc *epc) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, BME, NULL); +} +EXPORT_SYMBOL_GPL(pci_epc_bme_notify); + +/** + * pci_epc_pme_notify() - Notify the EPF device that the EPC device has received + * the PME from the Root complex + * @epc: the EPC device that received the PME + * @data: Data for the PME notifier + * + * Invoke to Notify the EPF device that the EPC device has received the Power + * Management Event (PME) from the Root complex + */ +void pci_epc_pme_notify(struct pci_epc *epc, void *data) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, PME, data); +} +EXPORT_SYMBOL_GPL(pci_epc_pme_notify); + +/** + * pci_epc_d_state_notify() - Notify the EPF device that the EPC device has + * received the Device State event from Root complex + * @epc: the EPC device that received the Device State event + * @data: Data for the D_STATE notifier + * + * Invoke to notify the EPF device that the EPC device has received the Device + * State (D_STATE) event from the Root complex + */ +void pci_epc_d_state_notify(struct pci_epc *epc, void *data) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, D_STATE, data); +} +EXPORT_SYMBOL_GPL(pci_epc_d_state_notify); + +/** + * pci_epc_custom_notify() - Notify the EPF device that the EPC device has + * received the custom events from the Root complex + * @epc: EPC device that received the custom event + * @data: Data for the CUSTOM notifier + * + * Invoke to notify the EPF device that the EPC device has received the Custom + * event from the Root complex. The custom event is EPC/vendor specific and is + * shared with the EPF device. + */ +void pci_epc_custom_notify(struct pci_epc *epc, void *data) +{ + if (!epc || IS_ERR(epc)) + return; + + atomic_notifier_call_chain(&epc->notifier, CUSTOM, data); +} +EXPORT_SYMBOL_GPL(pci_epc_custom_notify); + +/** * pci_epc_destroy() - destroy the EPC device * @epc: the EPC device that has to be destroyed * diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h new file mode 100644 index 000000000000..1c81898f2fd3 --- /dev/null +++ b/include/linux/mhi_ep.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2021, Linaro Ltd. + * + */ +#ifndef _MHI_EP_H_ +#define _MHI_EP_H_ + +#include <linux/dma-direction.h> +#include <linux/mhi.h> + +#define MHI_EP_DEFAULT_MTU 0x4000 + +struct mhi_ep_chan; +struct mhi_ep_cmd; +struct mhi_ep_event; +struct mhi_cmd_ctx; +struct mhi_event_ctx; +struct mhi_chan_ctx; + +/* MHI Link states */ +enum mhi_ep_link_state { + LINK_STATE_UP, + LINK_STATE_DOWN, + LINK_STATE_BME, + LINK_STATE_D0, + LINK_STATE_D3, + LINK_STATE_D3_COLD, + LINK_STATE_MAX +}; + +struct mhi_ep_channel_config { + char *name; + u32 num; + u32 num_elements; + enum dma_data_direction dir; +}; + +struct mhi_ep_cntrl_config { + u32 max_channels; + u32 num_channels; + const struct mhi_ep_channel_config *ch_cfg; + u32 mhi_version; +}; + +struct mhi_ep_interrupt_state { + u32 mask; + u32 status; +}; + +struct mhi_ep_cntrl { + struct device *cntrl_dev; + struct mhi_ep_device *mhi_dev; + void __iomem *mmio; + + struct mhi_ep_chan *mhi_chan; + struct mhi_ep_cmd *mhi_cmd; + struct mhi_ep_event *mhi_event; + struct mhi_ep_sm *sm; + + /* Host control base information */ + struct mhi_chan_ctx *ch_ctx_cache; + struct mhi_event_ctx *ev_ctx_cache; + struct mhi_cmd_ctx *cmd_ctx_cache; + + u64 ch_ctx_host_pa; + u64 ev_ctx_host_pa; + u64 cmd_ctx_host_pa; + phys_addr_t ch_ctx_cache_phys; + phys_addr_t ev_ctx_cache_phys; + phys_addr_t cmd_ctx_cache_phys; + size_t ch_ctx_host_size; + size_t ev_ctx_host_size; + size_t cmd_ctx_host_size; + + struct workqueue_struct *state_wq; + struct workqueue_struct *ring_wq; + struct work_struct state_work; + struct work_struct ring_work; + struct work_struct init_work; + struct work_struct reset_work; + + struct list_head ring_transition_list; + struct list_head st_transition_list; + spinlock_t transition_lock; + spinlock_t state_lock; + + struct mutex event_lock; + + /* CHDB and EVDB device interrupt state */ + struct mhi_ep_interrupt_state chdb[4]; + struct mhi_ep_interrupt_state evdb[4]; + + void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl); + void __iomem *(*alloc_addr)(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t *phys_addr, size_t size); + void (*free_addr)(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t phys_addr, void __iomem *virt_addr, size_t size); + int (*map_addr)(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t phys_addr, u64 pci_addr, size_t size); + void (*unmap_addr)(struct mhi_ep_cntrl *mhi_cntrl, + phys_addr_t phys_addr); + + enum mhi_ep_link_state link_state; + enum mhi_state mhi_state; + + u32 reg_len; + u32 version; + u32 event_rings; + u32 hw_event_rings; + u32 max_chan; + u32 channels; + u32 chdb_offset; + u32 erdb_offset; + + int irq; + int index; + bool is_enabled; +}; + +struct mhi_ep_device { + struct mhi_ep_cntrl *mhi_cntrl; + const struct mhi_device_id *id; + const char *name; + struct device dev; + struct mhi_ep_chan *ul_chan; + struct mhi_ep_chan *dl_chan; + enum mhi_device_type dev_type; + int ul_chan_id; + int dl_chan_id; +}; + +struct mhi_ep_driver { + const struct mhi_device_id *id_table; + struct device_driver driver; + int (*probe)(struct mhi_ep_device *mhi_ep, + const struct mhi_device_id *id); + void (*remove)(struct mhi_ep_device *mhi_ep); + void (*ul_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); + void (*dl_xfer_cb)(struct mhi_ep_device *mhi_dev, + struct mhi_result *result); +}; + +#define to_mhi_ep_device(dev) container_of(dev, struct mhi_ep_device, dev) +#define to_mhi_ep_driver(drv) container_of(drv, struct mhi_ep_driver, driver) + +/* + * module_mhi_ep_driver() - Helper macro for drivers that don't do + * anything special other than using default mhi_ep_driver_register() and + * mhi_ep_driver_unregister(). This eliminates a lot of boilerplate. + * Each module may only use this macro once. + */ +#define module_mhi_ep_driver(mhi_drv) \ + module_driver(mhi_drv, mhi_ep_driver_register, \ + mhi_ep_driver_unregister) + +/* + * Macro to avoid include chaining to get THIS_MODULE + */ +#define mhi_ep_driver_register(mhi_drv) \ + __mhi_ep_driver_register(mhi_drv, THIS_MODULE) + +int __mhi_ep_driver_register(struct mhi_ep_driver *mhi_drv, struct module *owner); +void mhi_ep_driver_unregister(struct mhi_ep_driver *mhi_drv); + +int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, + const struct mhi_ep_cntrl_config *config); +void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl); +void mhi_ep_power_down(struct mhi_ep_cntrl *mhi_cntrl); + +int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir, + struct sk_buff *skb, size_t len, enum mhi_flags mflags); +bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_direction dir); +int mhi_ep_set_link_state(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ep_link_state state); + +#endif diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index ae2e75d15b21..a85d453ebf67 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -835,6 +835,8 @@ struct wmi_device_id { #define MHI_DEVICE_MODALIAS_FMT "mhi:%s" #define MHI_NAME_SIZE 32 +#define MHI_EP_DEVICE_MODALIAS_FMT "mhi_ep:%s" + /** * struct mhi_device_id - MHI device identification * @chan: MHI channel name diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index a48778e1a4ee..ebd98233fdc6 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -198,6 +198,12 @@ pci_epc_register_notifier(struct pci_epc *epc, struct notifier_block *nb) return atomic_notifier_chain_register(&epc->notifier, nb); } +static inline int +pci_epc_unregister_notifier(struct pci_epc *epc, struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&epc->notifier, nb); +} + struct pci_epc * __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, struct module *owner); @@ -209,7 +215,12 @@ void pci_epc_destroy(struct pci_epc *epc); int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); void pci_epc_linkup(struct pci_epc *epc); +void pci_epc_linkdown(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); +void pci_epc_bme_notify(struct pci_epc *epc); +void pci_epc_pme_notify(struct pci_epc *epc, void *data); +void pci_epc_d_state_notify(struct pci_epc *epc, void *data); +void pci_epc_custom_notify(struct pci_epc *epc, void *data); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 009a07147c61..da360659c322 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -20,6 +20,11 @@ enum pci_epc_interface_type; enum pci_notify_event { CORE_INIT, LINK_UP, + LINK_DOWN, + BME, + PME, + D_STATE, + CUSTOM, }; enum pci_barno { diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 49aba862073e..90cda36f3159 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1380,6 +1380,15 @@ static int do_mhi_entry(const char *filename, void *symval, char *alias) return 1; } +/* Looks like: mhi_ep:S */ +static int do_mhi_ep_entry(const char *filename, void *symval, char *alias) +{ + DEF_FIELD_ADDR(symval, mhi_device_id, chan); + sprintf(alias, MHI_EP_DEVICE_MODALIAS_FMT, *chan); + + return 1; +} + static int do_auxiliary_entry(const char *filename, void *symval, char *alias) { DEF_FIELD_ADDR(symval, auxiliary_device_id, name); @@ -1496,6 +1505,7 @@ static const struct devtable devtable[] = { {"tee", SIZE_tee_client_device_id, do_tee_entry}, {"wmi", SIZE_wmi_device_id, do_wmi_entry}, {"mhi", SIZE_mhi_device_id, do_mhi_entry}, + {"mhi_ep", SIZE_mhi_device_id, do_mhi_ep_entry}, {"auxiliary", SIZE_auxiliary_device_id, do_auxiliary_entry}, {"ssam", SIZE_ssam_device_id, do_ssam_entry}, {"dfl", SIZE_dfl_device_id, do_dfl_entry}, |